Index: src/mark-compact.cc |
=================================================================== |
--- src/mark-compact.cc (revision 9327) |
+++ src/mark-compact.cc (working copy) |
@@ -29,18 +29,27 @@ |
#include "compilation-cache.h" |
#include "execution.h" |
-#include "heap-profiler.h" |
#include "gdb-jit.h" |
#include "global-handles.h" |
+#include "heap-profiler.h" |
#include "ic-inl.h" |
+#include "incremental-marking.h" |
#include "liveobjectlist-inl.h" |
#include "mark-compact.h" |
#include "objects-visiting.h" |
+#include "objects-visiting-inl.h" |
#include "stub-cache.h" |
namespace v8 { |
namespace internal { |
+ |
+const char* Marking::kWhiteBitPattern = "00"; |
+const char* Marking::kBlackBitPattern = "10"; |
+const char* Marking::kGreyBitPattern = "11"; |
+const char* Marking::kImpossibleBitPattern = "01"; |
+ |
+ |
// ------------------------------------------------------------------------- |
// MarkCompactCollector |
@@ -48,11 +57,11 @@ |
#ifdef DEBUG |
state_(IDLE), |
#endif |
- force_compaction_(false), |
- compacting_collection_(false), |
- compact_on_next_gc_(false), |
- previous_marked_count_(0), |
+ sweep_precisely_(false), |
+ compacting_(false), |
+ collect_maps_(FLAG_collect_maps), |
tracer_(NULL), |
+ migration_slots_buffer_(NULL), |
#ifdef DEBUG |
live_young_objects_size_(0), |
live_old_pointer_objects_size_(0), |
@@ -68,50 +77,383 @@ |
encountered_weak_maps_(NULL) { } |
+#ifdef DEBUG |
+class VerifyMarkingVisitor: public ObjectVisitor { |
+ public: |
+ void VisitPointers(Object** start, Object** end) { |
+ for (Object** current = start; current < end; current++) { |
+ if ((*current)->IsHeapObject()) { |
+ HeapObject* object = HeapObject::cast(*current); |
+ ASSERT(HEAP->mark_compact_collector()->IsMarked(object)); |
+ } |
+ } |
+ } |
+}; |
+ |
+ |
+static void VerifyMarking(Address bottom, Address top) { |
+ VerifyMarkingVisitor visitor; |
+ HeapObject* object; |
+ Address next_object_must_be_here_or_later = bottom; |
+ |
+ for (Address current = bottom; |
+ current < top; |
+ current += kPointerSize) { |
+ object = HeapObject::FromAddress(current); |
+ if (MarkCompactCollector::IsMarked(object)) { |
+ ASSERT(current >= next_object_must_be_here_or_later); |
+ object->Iterate(&visitor); |
+ next_object_must_be_here_or_later = current + object->Size(); |
+ } |
+ } |
+} |
+ |
+ |
+static void VerifyMarking(Page* p) { |
+ VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd()); |
+} |
+ |
+ |
+static void VerifyMarking(NewSpace* space) { |
+ Address end = space->top(); |
+ NewSpacePageIterator it(space->bottom(), end); |
+ // The bottom position is at the start of its page. Allows us to use |
+ // page->body() as start of range on all pages. |
+ ASSERT_EQ(space->bottom(), |
+ NewSpacePage::FromAddress(space->bottom())->body()); |
+ while (it.has_next()) { |
+ NewSpacePage* page = it.next(); |
+ Address limit = it.has_next() ? page->body_limit() : end; |
+ ASSERT(limit == end || !page->Contains(end)); |
+ VerifyMarking(page->body(), limit); |
+ } |
+} |
+ |
+ |
+static void VerifyMarking(PagedSpace* space) { |
+ PageIterator it(space); |
+ |
+ while (it.has_next()) { |
+ VerifyMarking(it.next()); |
+ } |
+} |
+ |
+ |
+static void VerifyMarking(Heap* heap) { |
+ VerifyMarking(heap->old_pointer_space()); |
+ VerifyMarking(heap->old_data_space()); |
+ VerifyMarking(heap->code_space()); |
+ VerifyMarking(heap->cell_space()); |
+ VerifyMarking(heap->map_space()); |
+ VerifyMarking(heap->new_space()); |
+ |
+ VerifyMarkingVisitor visitor; |
+ |
+ LargeObjectIterator it(heap->lo_space()); |
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
+ if (MarkCompactCollector::IsMarked(obj)) { |
+ obj->Iterate(&visitor); |
+ } |
+ } |
+ |
+ heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); |
+} |
+ |
+ |
+class VerifyEvacuationVisitor: public ObjectVisitor { |
+ public: |
+ void VisitPointers(Object** start, Object** end) { |
+ for (Object** current = start; current < end; current++) { |
+ if ((*current)->IsHeapObject()) { |
+ HeapObject* object = HeapObject::cast(*current); |
+ if (MarkCompactCollector::IsOnEvacuationCandidate(object)) { |
+ CHECK(false); |
+ } |
+ } |
+ } |
+ } |
+ |
+ HeapObject* source_; |
+}; |
+ |
+ |
+static void VerifyEvacuation(Address bottom, Address top) { |
+ VerifyEvacuationVisitor visitor; |
+ HeapObject* object; |
+ Address next_object_must_be_here_or_later = bottom; |
+ |
+ for (Address current = bottom; |
+ current < top; |
+ current += kPointerSize) { |
+ object = HeapObject::FromAddress(current); |
+ if (MarkCompactCollector::IsMarked(object)) { |
+ ASSERT(current >= next_object_must_be_here_or_later); |
+ visitor.source_ = object; |
+ object->Iterate(&visitor); |
+ next_object_must_be_here_or_later = current + object->Size(); |
+ } |
+ } |
+} |
+ |
+ |
+static void VerifyEvacuation(Page* p) { |
+ if (p->IsEvacuationCandidate()) return; |
+ |
+ VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd()); |
+} |
+ |
+ |
+static void VerifyEvacuation(NewSpace* space) { |
+ // TODO(gc) Verify evacution for new space. |
+} |
+ |
+ |
+static void VerifyEvacuation(PagedSpace* space) { |
+ PageIterator it(space); |
+ |
+ while (it.has_next()) { |
+ VerifyEvacuation(it.next()); |
+ } |
+} |
+ |
+ |
+static void VerifyEvacuation(Heap* heap) { |
+ VerifyEvacuation(heap->old_pointer_space()); |
+ VerifyEvacuation(heap->old_data_space()); |
+ VerifyEvacuation(heap->code_space()); |
+ VerifyEvacuation(heap->cell_space()); |
+ VerifyEvacuation(heap->map_space()); |
+ VerifyEvacuation(heap->new_space()); |
+ |
+ VerifyEvacuationVisitor visitor; |
+ heap->IterateStrongRoots(&visitor, VISIT_ALL); |
+} |
+#endif |
+ |
+ |
+void MarkCompactCollector::AddEvacuationCandidate(Page* p) { |
+ p->MarkEvacuationCandidate(); |
+ evacuation_candidates_.Add(p); |
+} |
+ |
+ |
+bool MarkCompactCollector::StartCompaction() { |
+ // Don't start compaction if we are in the middle of incremental |
+ // marking cycle. We did not collect any slots. |
+ if (!compacting_ && !heap_->incremental_marking()->IsMarking()) { |
+ ASSERT(evacuation_candidates_.length() == 0); |
+ |
+ CollectEvacuationCandidates(heap()->old_pointer_space()); |
+ CollectEvacuationCandidates(heap()->old_data_space()); |
+ CollectEvacuationCandidates(heap()->code_space()); |
+ |
+ heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); |
+ heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); |
+ heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); |
+ |
+ compacting_ = evacuation_candidates_.length() > 0; |
+ } |
+ |
+ return compacting_; |
+} |
+ |
+ |
void MarkCompactCollector::CollectGarbage() { |
// Make sure that Prepare() has been called. The individual steps below will |
// update the state as they proceed. |
ASSERT(state_ == PREPARE_GC); |
ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); |
- // Prepare has selected whether to compact the old generation or not. |
- // Tell the tracer. |
- if (IsCompacting()) tracer_->set_is_compacting(); |
- |
MarkLiveObjects(); |
+ ASSERT(heap_->incremental_marking()->IsStopped()); |
- if (FLAG_collect_maps) ClearNonLiveTransitions(); |
+ if (collect_maps_) ClearNonLiveTransitions(); |
ClearWeakMaps(); |
- SweepLargeObjectSpace(); |
+#ifdef DEBUG |
+ if (FLAG_verify_heap) { |
+ VerifyMarking(heap_); |
+ } |
+#endif |
- if (IsCompacting()) { |
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT); |
- EncodeForwardingAddresses(); |
+ SweepSpaces(); |
- heap()->MarkMapPointersAsEncoded(true); |
- UpdatePointers(); |
- heap()->MarkMapPointersAsEncoded(false); |
- heap()->isolate()->pc_to_code_cache()->Flush(); |
+ if (!collect_maps_) ReattachInitialMaps(); |
- RelocateObjects(); |
- } else { |
- SweepSpaces(); |
- heap()->isolate()->pc_to_code_cache()->Flush(); |
- } |
+ heap_->isolate()->pc_to_code_cache()->Flush(); |
Finish(); |
- // Save the count of marked objects remaining after the collection and |
- // null out the GC tracer. |
- previous_marked_count_ = tracer_->marked_count(); |
- ASSERT(previous_marked_count_ == 0); |
tracer_ = NULL; |
} |
+#ifdef DEBUG |
+void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { |
+ PageIterator it(space); |
+ |
+ while (it.has_next()) { |
+ Page* p = it.next(); |
+ ASSERT(p->markbits()->IsClean()); |
+ } |
+} |
+ |
+void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { |
+ NewSpacePageIterator it(space->bottom(), space->top()); |
+ |
+ while (it.has_next()) { |
+ NewSpacePage* p = it.next(); |
+ ASSERT(p->markbits()->IsClean()); |
+ } |
+} |
+ |
+void MarkCompactCollector::VerifyMarkbitsAreClean() { |
+ VerifyMarkbitsAreClean(heap_->old_pointer_space()); |
+ VerifyMarkbitsAreClean(heap_->old_data_space()); |
+ VerifyMarkbitsAreClean(heap_->code_space()); |
+ VerifyMarkbitsAreClean(heap_->cell_space()); |
+ VerifyMarkbitsAreClean(heap_->map_space()); |
+ VerifyMarkbitsAreClean(heap_->new_space()); |
+ |
+ LargeObjectIterator it(heap_->lo_space()); |
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
+ MarkBit mark_bit = Marking::MarkBitFrom(obj); |
+ ASSERT(Marking::IsWhite(mark_bit)); |
+ } |
+} |
+#endif |
+ |
+ |
+static void ClearMarkbits(PagedSpace* space) { |
+ PageIterator it(space); |
+ |
+ while (it.has_next()) { |
+ Bitmap::Clear(it.next()); |
+ } |
+} |
+ |
+ |
+static void ClearMarkbits(NewSpace* space) { |
+ NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); |
+ |
+ while (it.has_next()) { |
+ Bitmap::Clear(it.next()); |
+ } |
+} |
+ |
+ |
+static void ClearMarkbits(Heap* heap) { |
+ ClearMarkbits(heap->code_space()); |
+ ClearMarkbits(heap->map_space()); |
+ ClearMarkbits(heap->old_pointer_space()); |
+ ClearMarkbits(heap->old_data_space()); |
+ ClearMarkbits(heap->cell_space()); |
+ ClearMarkbits(heap->new_space()); |
+ |
+ LargeObjectIterator it(heap->lo_space()); |
+ for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
+ MarkBit mark_bit = Marking::MarkBitFrom(obj); |
+ mark_bit.Clear(); |
+ mark_bit.Next().Clear(); |
+ } |
+} |
+ |
+ |
+bool Marking::TransferMark(Address old_start, Address new_start) { |
+ // This is only used when resizing an object. |
+ ASSERT(MemoryChunk::FromAddress(old_start) == |
+ MemoryChunk::FromAddress(new_start)); |
+ // If the mark doesn't move, we don't check the color of the object. |
+ // It doesn't matter whether the object is black, since it hasn't changed |
+ // size, so the adjustment to the live data count will be zero anyway. |
+ if (old_start == new_start) return false; |
+ |
+ MarkBit new_mark_bit = MarkBitFrom(new_start); |
+ |
+ if (heap_->incremental_marking()->IsMarking()) { |
+ MarkBit old_mark_bit = MarkBitFrom(old_start); |
+#ifdef DEBUG |
+ ObjectColor old_color = Color(old_mark_bit); |
+#endif |
+ if (Marking::IsBlack(old_mark_bit)) { |
+ Marking::MarkBlack(new_mark_bit); |
+ old_mark_bit.Clear(); |
+ return true; |
+ } else if (Marking::IsGrey(old_mark_bit)) { |
+ old_mark_bit.Next().Clear(); |
+ heap_->incremental_marking()->WhiteToGreyAndPush( |
+ HeapObject::FromAddress(new_start), new_mark_bit); |
+ heap_->incremental_marking()->RestartIfNotMarking(); |
+ } |
+ |
+#ifdef DEBUG |
+ ObjectColor new_color = Color(new_mark_bit); |
+ ASSERT(new_color == old_color); |
+#endif |
+ return false; |
+ } |
+ MarkBit old_mark_bit = MarkBitFrom(old_start); |
+ if (!old_mark_bit.Get()) { |
+ return false; |
+ } |
+ new_mark_bit.Set(); |
+ return true; |
+} |
+ |
+ |
+static const char* AllocationSpaceName(AllocationSpace space) { |
+ switch (space) { |
+ case NEW_SPACE: return "NEW_SPACE"; |
+ case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE"; |
+ case OLD_DATA_SPACE: return "OLD_DATA_SPACE"; |
+ case CODE_SPACE: return "CODE_SPACE"; |
+ case MAP_SPACE: return "MAP_SPACE"; |
+ case CELL_SPACE: return "CELL_SPACE"; |
+ case LO_SPACE: return "LO_SPACE"; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ |
+ return NULL; |
+} |
+ |
+ |
+void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
+ ASSERT(space->identity() == OLD_POINTER_SPACE || |
+ space->identity() == OLD_DATA_SPACE || |
+ space->identity() == CODE_SPACE); |
+ |
+ PageIterator it(space); |
+ int count = 0; |
+ if (it.has_next()) it.next(); // Never compact the first page. |
+ while (it.has_next()) { |
+ Page* p = it.next(); |
+ if (space->IsFragmented(p)) { |
+ AddEvacuationCandidate(p); |
+ count++; |
+ } else { |
+ p->ClearEvacuationCandidate(); |
+ } |
+ } |
+ |
+ if (count > 0 && FLAG_trace_fragmentation) { |
+ PrintF("Collected %d evacuation candidates for space %s\n", |
+ count, |
+ AllocationSpaceName(space->identity())); |
+ } |
+} |
+ |
+ |
void MarkCompactCollector::Prepare(GCTracer* tracer) { |
+ FLAG_flush_code = false; |
+ |
+ // Disable collection of maps if incremental marking is enabled. |
+ // Map collection algorithm relies on a special map transition tree traversal |
+ // order which is not implemented for incremental marking. |
+ collect_maps_ = FLAG_collect_maps && |
+ !heap()->incremental_marking()->IsMarking(); |
+ |
// Rather than passing the tracer around we stash it in a static member |
// variable. |
tracer_ = tracer; |
@@ -122,14 +464,7 @@ |
#endif |
ASSERT(!FLAG_always_compact || !FLAG_never_compact); |
- compacting_collection_ = |
- FLAG_always_compact || force_compaction_ || compact_on_next_gc_; |
- compact_on_next_gc_ = false; |
- |
- if (FLAG_never_compact) compacting_collection_ = false; |
- if (!heap()->map_space()->MapPointersEncodable()) |
- compacting_collection_ = false; |
- if (FLAG_collect_maps) CreateBackPointers(); |
+ if (collect_maps_) CreateBackPointers(); |
#ifdef ENABLE_GDB_JIT_INTERFACE |
if (FLAG_gdbjit) { |
// If GDBJIT interface is active disable compaction. |
@@ -137,13 +472,28 @@ |
} |
#endif |
+ // Clear marking bits for precise sweeping to collect all garbage. |
+ if (heap()->incremental_marking()->IsMarking() && PreciseSweepingRequired()) { |
+ heap()->incremental_marking()->Abort(); |
+ ClearMarkbits(heap_); |
+ } |
+ |
+ if (!FLAG_never_compact) StartCompaction(); |
+ |
PagedSpaces spaces; |
for (PagedSpace* space = spaces.next(); |
- space != NULL; space = spaces.next()) { |
- space->PrepareForMarkCompact(compacting_collection_); |
+ space != NULL; |
+ space = spaces.next()) { |
+ space->PrepareForMarkCompact(); |
} |
#ifdef DEBUG |
+ if (!heap()->incremental_marking()->IsMarking()) { |
+ VerifyMarkbitsAreClean(); |
+ } |
+#endif |
+ |
+#ifdef DEBUG |
live_bytes_ = 0; |
live_young_objects_size_ = 0; |
live_old_pointer_objects_size_ = 0; |
@@ -168,31 +518,6 @@ |
heap()->isolate()->stub_cache()->Clear(); |
heap()->external_string_table_.CleanUp(); |
- |
- // If we've just compacted old space there's no reason to check the |
- // fragmentation limit. Just return. |
- if (HasCompacted()) return; |
- |
- // We compact the old generation on the next GC if it has gotten too |
- // fragmented (ie, we could recover an expected amount of space by |
- // reclaiming the waste and free list blocks). |
- static const int kFragmentationLimit = 15; // Percent. |
- static const int kFragmentationAllowed = 1 * MB; // Absolute. |
- intptr_t old_gen_recoverable = 0; |
- intptr_t old_gen_used = 0; |
- |
- OldSpaces spaces; |
- for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { |
- old_gen_recoverable += space->Waste() + space->AvailableFree(); |
- old_gen_used += space->Size(); |
- } |
- |
- int old_gen_fragmentation = |
- static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); |
- if (old_gen_fragmentation > kFragmentationLimit && |
- old_gen_recoverable > kFragmentationAllowed) { |
- compact_on_next_gc_ = true; |
- } |
} |
@@ -261,13 +586,21 @@ |
SharedFunctionInfo* shared = candidate->unchecked_shared(); |
Code* code = shared->unchecked_code(); |
- if (!code->IsMarked()) { |
+ MarkBit code_mark = Marking::MarkBitFrom(code); |
+ if (!code_mark.Get()) { |
shared->set_code(lazy_compile); |
candidate->set_code(lazy_compile); |
} else { |
candidate->set_code(shared->unchecked_code()); |
} |
+ // We are in the middle of a GC cycle so the write barrier in the code |
+ // setter did not record the slot update and we have to do that manually. |
+ Address slot = candidate->address() + JSFunction::kCodeEntryOffset; |
+ Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot)); |
+ isolate_->heap()->mark_compact_collector()-> |
+ RecordCodeEntrySlot(slot, target); |
+ |
candidate = next_candidate; |
} |
@@ -285,7 +618,8 @@ |
SetNextCandidate(candidate, NULL); |
Code* code = candidate->unchecked_code(); |
- if (!code->IsMarked()) { |
+ MarkBit code_mark = Marking::MarkBitFrom(code); |
+ if (!code_mark.Get()) { |
candidate->set_code(lazy_compile); |
} |
@@ -355,14 +689,13 @@ |
// except the maps for the object and its possible substrings might be |
// marked. |
HeapObject* object = HeapObject::cast(*p); |
- MapWord map_word = object->map_word(); |
- map_word.ClearMark(); |
- InstanceType type = map_word.ToMap()->instance_type(); |
+ Map* map = object->map(); |
+ InstanceType type = map->instance_type(); |
if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; |
Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); |
- Heap* heap = map_word.ToMap()->heap(); |
- if (second != heap->raw_unchecked_empty_string()) { |
+ Heap* heap = map->GetHeap(); |
+ if (second != heap->empty_string()) { |
return object; |
} |
@@ -404,14 +737,12 @@ |
FixedArray::BodyDescriptor, |
void>::Visit); |
+ table_.Register(kVisitGlobalContext, &VisitGlobalContext); |
+ |
table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit); |
- table_.Register(kVisitGlobalContext, |
- &FixedBodyVisitor<StaticMarkingVisitor, |
- Context::MarkCompactBodyDescriptor, |
- void>::Visit); |
- |
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); |
+ table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit); |
table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); |
table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); |
@@ -456,7 +787,7 @@ |
} |
INLINE(static void VisitPointer(Heap* heap, Object** p)) { |
- MarkObjectByPointer(heap, p); |
+ MarkObjectByPointer(heap->mark_compact_collector(), p, p); |
} |
INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { |
@@ -466,47 +797,55 @@ |
if (VisitUnmarkedObjects(heap, start, end)) return; |
// We are close to a stack overflow, so just mark the objects. |
} |
- for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p); |
+ MarkCompactCollector* collector = heap->mark_compact_collector(); |
+ for (Object** p = start; p < end; p++) { |
+ MarkObjectByPointer(collector, start, p); |
+ } |
} |
+ static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) { |
+ ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); |
+ JSGlobalPropertyCell* cell = |
+ JSGlobalPropertyCell::cast(rinfo->target_cell()); |
+ MarkBit mark = Marking::MarkBitFrom(cell); |
+ heap->mark_compact_collector()->MarkObject(cell, mark); |
+ } |
+ |
static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) { |
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); |
- Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
- if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) { |
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
+ if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) { |
IC::Clear(rinfo->pc()); |
// Please note targets for cleared inline cached do not have to be |
// marked since they are contained in HEAP->non_monomorphic_cache(). |
+ target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
} else { |
- heap->mark_compact_collector()->MarkObject(code); |
+ MarkBit code_mark = Marking::MarkBitFrom(target); |
+ heap->mark_compact_collector()->MarkObject(target, code_mark); |
} |
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); |
} |
- static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) { |
- ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); |
- Object* cell = rinfo->target_cell(); |
- Object* old_cell = cell; |
- VisitPointer(heap, &cell); |
- if (cell != old_cell) { |
- rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell)); |
- } |
- } |
- |
static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) { |
ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && |
rinfo->IsPatchedReturnSequence()) || |
(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && |
rinfo->IsPatchedDebugBreakSlotSequence())); |
- HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address()); |
- heap->mark_compact_collector()->MarkObject(code); |
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); |
+ MarkBit code_mark = Marking::MarkBitFrom(target); |
+ heap->mark_compact_collector()->MarkObject(target, code_mark); |
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); |
} |
// Mark object pointed to by p. |
- INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) { |
+ INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, |
+ Object** anchor_slot, |
+ Object** p)) { |
if (!(*p)->IsHeapObject()) return; |
HeapObject* object = ShortCircuitConsString(p); |
- if (!object->IsMarked()) { |
- heap->mark_compact_collector()->MarkUnmarkedObject(object); |
- } |
+ collector->RecordSlot(anchor_slot, p, object); |
+ MarkBit mark = Marking::MarkBitFrom(object); |
+ collector->MarkObject(object, mark); |
} |
@@ -515,12 +854,15 @@ |
HeapObject* obj)) { |
#ifdef DEBUG |
ASSERT(Isolate::Current()->heap()->Contains(obj)); |
- ASSERT(!obj->IsMarked()); |
+ ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj)); |
#endif |
Map* map = obj->map(); |
- collector->SetMark(obj); |
+ Heap* heap = obj->GetHeap(); |
+ MarkBit mark = Marking::MarkBitFrom(obj); |
+ heap->mark_compact_collector()->SetMark(obj, mark); |
// Mark the map pointer and the body. |
- if (!map->IsMarked()) collector->MarkUnmarkedObject(map); |
+ MarkBit map_mark = Marking::MarkBitFrom(map); |
+ heap->mark_compact_collector()->MarkObject(map, map_mark); |
IterateBody(map, obj); |
} |
@@ -536,9 +878,12 @@ |
MarkCompactCollector* collector = heap->mark_compact_collector(); |
// Visit the unmarked objects. |
for (Object** p = start; p < end; p++) { |
- if (!(*p)->IsHeapObject()) continue; |
- HeapObject* obj = HeapObject::cast(*p); |
- if (obj->IsMarked()) continue; |
+ Object* o = *p; |
+ if (!o->IsHeapObject()) continue; |
+ collector->RecordSlot(start, p, o); |
+ HeapObject* obj = HeapObject::cast(o); |
+ MarkBit mark = Marking::MarkBitFrom(obj); |
+ if (mark.Get()) continue; |
VisitUnmarkedObject(collector, obj); |
} |
return true; |
@@ -567,7 +912,7 @@ |
void> StructObjectVisitor; |
static void VisitJSWeakMap(Map* map, HeapObject* object) { |
- MarkCompactCollector* collector = map->heap()->mark_compact_collector(); |
+ MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); |
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object); |
// Enqueue weak map in linked list of encountered weak maps. |
@@ -578,25 +923,28 @@ |
// Skip visiting the backing hash table containing the mappings. |
int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object); |
BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( |
- map->heap(), |
+ map->GetHeap(), |
object, |
JSWeakMap::BodyDescriptor::kStartOffset, |
JSWeakMap::kTableOffset); |
BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( |
- map->heap(), |
+ map->GetHeap(), |
object, |
JSWeakMap::kTableOffset + kPointerSize, |
object_size); |
// Mark the backing hash table without pushing it on the marking stack. |
- ASSERT(!weak_map->unchecked_table()->IsMarked()); |
- ASSERT(weak_map->unchecked_table()->map()->IsMarked()); |
- collector->SetMark(weak_map->unchecked_table()); |
+ ASSERT(!MarkCompactCollector::IsMarked(weak_map->unchecked_table())); |
+ ASSERT(MarkCompactCollector::IsMarked(weak_map->unchecked_table()->map())); |
+ |
+ HeapObject* unchecked_table = weak_map->unchecked_table(); |
+ MarkBit mark_bit = Marking::MarkBitFrom(unchecked_table); |
+ collector->SetMark(unchecked_table, mark_bit); |
} |
static void VisitCode(Map* map, HeapObject* object) { |
reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>( |
- map->heap()); |
+ map->GetHeap()); |
} |
// Code flushing support. |
@@ -608,7 +956,7 @@ |
static const int kRegExpCodeThreshold = 5; |
inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) { |
- Object* undefined = heap->raw_unchecked_undefined_value(); |
+ Object* undefined = heap->undefined_value(); |
return (info->script() != undefined) && |
(reinterpret_cast<Script*>(info->script())->source() != undefined); |
} |
@@ -629,7 +977,9 @@ |
// Code is either on stack, in compilation cache or referenced |
// by optimized version of function. |
- if (function->unchecked_code()->IsMarked()) { |
+ MarkBit code_mark = |
+ Marking::MarkBitFrom(function->unchecked_code()); |
+ if (code_mark.Get()) { |
shared_info->set_code_age(0); |
return false; |
} |
@@ -645,7 +995,9 @@ |
inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) { |
// Code is either on stack, in compilation cache or referenced |
// by optimized version of function. |
- if (shared_info->unchecked_code()->IsMarked()) { |
+ MarkBit code_mark = |
+ Marking::MarkBitFrom(shared_info->unchecked_code()); |
+ if (code_mark.Get()) { |
shared_info->set_code_age(0); |
return false; |
} |
@@ -658,11 +1010,7 @@ |
// We never flush code for Api functions. |
Object* function_data = shared_info->function_data(); |
- if (function_data->IsHeapObject() && |
- (SafeMap(function_data)->instance_type() == |
- FUNCTION_TEMPLATE_INFO_TYPE)) { |
- return false; |
- } |
+ if (function_data->IsFunctionTemplateInfo()) return false; |
// Only flush code for functions. |
if (shared_info->code()->kind() != Code::FUNCTION) return false; |
@@ -695,40 +1043,9 @@ |
return true; |
} |
- |
- static inline Map* SafeMap(Object* obj) { |
- MapWord map_word = HeapObject::cast(obj)->map_word(); |
- map_word.ClearMark(); |
- map_word.ClearOverflow(); |
- return map_word.ToMap(); |
- } |
- |
- |
- static inline bool IsJSBuiltinsObject(Object* obj) { |
- return obj->IsHeapObject() && |
- (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE); |
- } |
- |
- |
static inline bool IsValidNotBuiltinContext(Object* ctx) { |
- if (!ctx->IsHeapObject()) return false; |
- |
- Map* map = SafeMap(ctx); |
- Heap* heap = map->heap(); |
- if (!(map == heap->raw_unchecked_function_context_map() || |
- map == heap->raw_unchecked_catch_context_map() || |
- map == heap->raw_unchecked_with_context_map() || |
- map == heap->raw_unchecked_global_context_map())) { |
- return false; |
- } |
- |
- Context* context = reinterpret_cast<Context*>(ctx); |
- |
- if (IsJSBuiltinsObject(context->global())) { |
- return false; |
- } |
- |
- return true; |
+ return ctx->IsContext() && |
+ !Context::cast(ctx)->global()->IsJSBuiltinsObject(); |
} |
@@ -748,13 +1065,15 @@ |
bool is_ascii) { |
// Make sure that the fixed array is in fact initialized on the RegExp. |
// We could potentially trigger a GC when initializing the RegExp. |
- if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return; |
+ if (HeapObject::cast(re->data())->map()->instance_type() != |
+ FIXED_ARRAY_TYPE) return; |
// Make sure this is a RegExp that actually contains code. |
if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return; |
Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii)); |
- if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) { |
+ if (!code->IsSmi() && |
+ HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) { |
// Save a copy that can be reinstated if we need the code again. |
re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii), |
code, |
@@ -790,7 +1109,7 @@ |
// If we did not use the code for kRegExpCodeThreshold mark sweep GCs |
// we flush the code. |
static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) { |
- Heap* heap = map->heap(); |
+ Heap* heap = map->GetHeap(); |
MarkCompactCollector* collector = heap->mark_compact_collector(); |
if (!collector->is_code_flushing_enabled()) { |
VisitJSRegExpFields(map, object); |
@@ -807,7 +1126,7 @@ |
static void VisitSharedFunctionInfoAndFlushCode(Map* map, |
HeapObject* object) { |
- MarkCompactCollector* collector = map->heap()->mark_compact_collector(); |
+ MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); |
if (!collector->is_code_flushing_enabled()) { |
VisitSharedFunctionInfoGeneric(map, object); |
return; |
@@ -818,7 +1137,7 @@ |
static void VisitSharedFunctionInfoAndFlushCodeGeneric( |
Map* map, HeapObject* object, bool known_flush_code_candidate) { |
- Heap* heap = map->heap(); |
+ Heap* heap = map->GetHeap(); |
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); |
if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap(); |
@@ -835,18 +1154,30 @@ |
static void VisitCodeEntry(Heap* heap, Address entry_address) { |
- Object* code = Code::GetObjectFromEntryAddress(entry_address); |
- Object* old_code = code; |
- VisitPointer(heap, &code); |
- if (code != old_code) { |
- Memory::Address_at(entry_address) = |
- reinterpret_cast<Code*>(code)->entry(); |
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); |
+ MarkBit mark = Marking::MarkBitFrom(code); |
+ heap->mark_compact_collector()->MarkObject(code, mark); |
+ heap->mark_compact_collector()-> |
+ RecordCodeEntrySlot(entry_address, code); |
+ } |
+ |
+ static void VisitGlobalContext(Map* map, HeapObject* object) { |
+ FixedBodyVisitor<StaticMarkingVisitor, |
+ Context::MarkCompactBodyDescriptor, |
+ void>::Visit(map, object); |
+ |
+ MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); |
+ for (int idx = Context::FIRST_WEAK_SLOT; |
+ idx < Context::GLOBAL_CONTEXT_SLOTS; |
+ ++idx) { |
+ Object** slot = |
+ HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx)); |
+ collector->RecordSlot(slot, slot, *slot); |
} |
} |
- |
static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) { |
- Heap* heap = map->heap(); |
+ Heap* heap = map->GetHeap(); |
MarkCompactCollector* collector = heap->mark_compact_collector(); |
if (!collector->is_code_flushing_enabled()) { |
VisitJSFunction(map, object); |
@@ -861,7 +1192,9 @@ |
} |
if (!flush_code_candidate) { |
- collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code()); |
+ Code* code = jsfunction->unchecked_shared()->unchecked_code(); |
+ MarkBit code_mark = Marking::MarkBitFrom(code); |
+ heap->mark_compact_collector()->MarkObject(code, code_mark); |
if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) { |
// For optimized functions we should retain both non-optimized version |
@@ -877,7 +1210,11 @@ |
i < count; |
i++) { |
JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i)); |
- collector->MarkObject(inlined->unchecked_shared()->unchecked_code()); |
+ Code* inlined_code = inlined->unchecked_shared()->unchecked_code(); |
+ MarkBit inlined_code_mark = |
+ Marking::MarkBitFrom(inlined_code); |
+ heap->mark_compact_collector()->MarkObject( |
+ inlined_code, inlined_code_mark); |
} |
} |
} |
@@ -902,12 +1239,11 @@ |
static inline void VisitJSFunctionFields(Map* map, |
JSFunction* object, |
bool flush_code_candidate) { |
- Heap* heap = map->heap(); |
- MarkCompactCollector* collector = heap->mark_compact_collector(); |
+ Heap* heap = map->GetHeap(); |
VisitPointers(heap, |
- SLOT_ADDR(object, JSFunction::kPropertiesOffset), |
- SLOT_ADDR(object, JSFunction::kCodeEntryOffset)); |
+ HeapObject::RawField(object, JSFunction::kPropertiesOffset), |
+ HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); |
if (!flush_code_candidate) { |
VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); |
@@ -917,29 +1253,39 @@ |
// Visit shared function info to avoid double checking of it's |
// flushability. |
SharedFunctionInfo* shared_info = object->unchecked_shared(); |
- if (!shared_info->IsMarked()) { |
+ MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info); |
+ if (!shared_info_mark.Get()) { |
Map* shared_info_map = shared_info->map(); |
- collector->SetMark(shared_info); |
- collector->MarkObject(shared_info_map); |
+ MarkBit shared_info_map_mark = |
+ Marking::MarkBitFrom(shared_info_map); |
+ heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark); |
+ heap->mark_compact_collector()->MarkObject(shared_info_map, |
+ shared_info_map_mark); |
VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, |
shared_info, |
true); |
} |
} |
- VisitPointers(heap, |
- SLOT_ADDR(object, |
- JSFunction::kCodeEntryOffset + kPointerSize), |
- SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset)); |
+ VisitPointers( |
+ heap, |
+ HeapObject::RawField(object, |
+ JSFunction::kCodeEntryOffset + kPointerSize), |
+ HeapObject::RawField(object, |
+ JSFunction::kNonWeakFieldsEndOffset)); |
// Don't visit the next function list field as it is a weak reference. |
+ Object** next_function = |
+ HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset); |
+ heap->mark_compact_collector()->RecordSlot( |
+ next_function, next_function, *next_function); |
} |
static inline void VisitJSRegExpFields(Map* map, |
HeapObject* object) { |
int last_property_offset = |
JSRegExp::kSize + kPointerSize * map->inobject_properties(); |
- VisitPointers(map->heap(), |
+ VisitPointers(map->GetHeap(), |
SLOT_ADDR(object, JSRegExp::kPropertiesOffset), |
SLOT_ADDR(object, last_property_offset)); |
} |
@@ -995,7 +1341,9 @@ |
void VisitThread(Isolate* isolate, ThreadLocalTop* top) { |
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { |
- collector_->MarkObject(it.frame()->unchecked_code()); |
+ Code* code = it.frame()->unchecked_code(); |
+ MarkBit code_bit = Marking::MarkBitFrom(code); |
+ collector_->MarkObject(it.frame()->unchecked_code(), code_bit); |
} |
} |
@@ -1017,8 +1365,10 @@ |
Object* obj = *slot; |
if (obj->IsSharedFunctionInfo()) { |
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); |
- collector_->MarkObject(shared->unchecked_code()); |
- collector_->MarkObject(shared); |
+ MarkBit shared_mark = Marking::MarkBitFrom(shared); |
+ MarkBit code_mark = Marking::MarkBitFrom(shared->unchecked_code()); |
+ collector_->MarkObject(shared->unchecked_code(), code_mark); |
+ collector_->MarkObject(shared, shared_mark); |
} |
} |
@@ -1046,12 +1396,16 @@ |
// Ensure that empty descriptor array is marked. Method MarkDescriptorArray |
// relies on it being marked before any other descriptor array. |
- MarkObject(heap()->raw_unchecked_empty_descriptor_array()); |
+ HeapObject* descriptor_array = heap()->empty_descriptor_array(); |
+ MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array); |
+ MarkObject(descriptor_array, descriptor_array_mark); |
// Make sure we are not referencing the code from the stack. |
ASSERT(this == heap()->mark_compact_collector()); |
for (StackFrameIterator it; !it.done(); it.Advance()) { |
- MarkObject(it.frame()->unchecked_code()); |
+ Code* code = it.frame()->unchecked_code(); |
+ MarkBit code_mark = Marking::MarkBitFrom(code); |
+ MarkObject(code, code_mark); |
} |
// Iterate the archived stacks in all threads to check if |
@@ -1064,7 +1418,7 @@ |
heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); |
heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); |
- ProcessMarkingStack(); |
+ ProcessMarkingDeque(); |
} |
@@ -1088,19 +1442,21 @@ |
// Replace flat cons strings in place. |
HeapObject* object = ShortCircuitConsString(p); |
- if (object->IsMarked()) return; |
+ MarkBit mark_bit = Marking::MarkBitFrom(object); |
+ if (mark_bit.Get()) return; |
Map* map = object->map(); |
// Mark the object. |
- collector_->SetMark(object); |
+ collector_->SetMark(object, mark_bit); |
// Mark the map pointer and body, and push them on the marking stack. |
- collector_->MarkObject(map); |
+ MarkBit map_mark = Marking::MarkBitFrom(map); |
+ collector_->MarkObject(map, map_mark); |
StaticMarkingVisitor::IterateBody(map, object); |
// Mark all the objects reachable from the map and body. May leave |
// overflowed objects in the heap. |
- collector_->EmptyMarkingStack(); |
+ collector_->EmptyMarkingDeque(); |
} |
MarkCompactCollector* collector_; |
@@ -1116,17 +1472,19 @@ |
virtual void VisitPointers(Object** start, Object** end) { |
// Visit all HeapObject pointers in [start, end). |
for (Object** p = start; p < end; p++) { |
- if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) { |
+ Object* o = *p; |
+ if (o->IsHeapObject() && |
+ !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { |
// Check if the symbol being pruned is an external symbol. We need to |
// delete the associated external data as this symbol is going away. |
// Since no objects have yet been moved we can safely access the map of |
// the object. |
- if ((*p)->IsExternalString()) { |
+ if (o->IsExternalString()) { |
heap_->FinalizeExternalString(String::cast(*p)); |
} |
// Set the entry to null_value (as deleted). |
- *p = heap_->raw_unchecked_null_value(); |
+ *p = heap_->null_value(); |
pointers_removed_++; |
} |
} |
@@ -1147,8 +1505,7 @@ |
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
public: |
virtual Object* RetainAs(Object* object) { |
- MapWord first_word = HeapObject::cast(object)->map_word(); |
- if (first_word.IsMarked()) { |
+ if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) { |
return object; |
} else { |
return NULL; |
@@ -1157,28 +1514,26 @@ |
}; |
-void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) { |
- ASSERT(!object->IsMarked()); |
+void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { |
+ ASSERT(IsMarked(object)); |
ASSERT(HEAP->Contains(object)); |
if (object->IsMap()) { |
Map* map = Map::cast(object); |
if (FLAG_cleanup_code_caches_at_gc) { |
map->ClearCodeCache(heap()); |
} |
- SetMark(map); |
// When map collection is enabled we have to mark through map's transitions |
// in a special way to make transition links weak. |
// Only maps for subclasses of JSReceiver can have transitions. |
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); |
- if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { |
+ if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { |
MarkMapContents(map); |
} else { |
- marking_stack_.Push(map); |
+ marking_deque_.PushBlack(map); |
} |
} else { |
- SetMark(object); |
- marking_stack_.Push(object); |
+ marking_deque_.PushBlack(object); |
} |
} |
@@ -1187,12 +1542,17 @@ |
// Mark prototype transitions array but don't push it into marking stack. |
// This will make references from it weak. We will clean dead prototype |
// transitions in ClearNonLiveTransitions. |
- FixedArray* prototype_transitions = map->unchecked_prototype_transitions(); |
- if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions); |
+ FixedArray* prototype_transitions = map->prototype_transitions(); |
+ MarkBit mark = Marking::MarkBitFrom(prototype_transitions); |
+ if (!mark.Get()) { |
+ mark.Set(); |
+ MemoryChunk::IncrementLiveBytes(prototype_transitions->address(), |
+ prototype_transitions->Size()); |
+ } |
- Object* raw_descriptor_array = |
- *HeapObject::RawField(map, |
- Map::kInstanceDescriptorsOrBitField3Offset); |
+ Object** raw_descriptor_array_slot = |
+ HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset); |
+ Object* raw_descriptor_array = *raw_descriptor_array_slot; |
if (!raw_descriptor_array->IsSmi()) { |
MarkDescriptorArray( |
reinterpret_cast<DescriptorArray*>(raw_descriptor_array)); |
@@ -1206,24 +1566,26 @@ |
Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); |
- StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot); |
+ StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot); |
} |
void MarkCompactCollector::MarkDescriptorArray( |
DescriptorArray* descriptors) { |
- if (descriptors->IsMarked()) return; |
+ MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors); |
+ if (descriptors_mark.Get()) return; |
// Empty descriptor array is marked as a root before any maps are marked. |
- ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array()); |
- SetMark(descriptors); |
+ ASSERT(descriptors != heap()->empty_descriptor_array()); |
+ SetMark(descriptors, descriptors_mark); |
FixedArray* contents = reinterpret_cast<FixedArray*>( |
descriptors->get(DescriptorArray::kContentArrayIndex)); |
ASSERT(contents->IsHeapObject()); |
- ASSERT(!contents->IsMarked()); |
+ ASSERT(!IsMarked(contents)); |
ASSERT(contents->IsFixedArray()); |
ASSERT(contents->length() >= 2); |
- SetMark(contents); |
+ MarkBit contents_mark = Marking::MarkBitFrom(contents); |
+ SetMark(contents, contents_mark); |
// Contents contains (value, details) pairs. If the details say that the type |
// of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, |
// EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as |
@@ -1233,25 +1595,33 @@ |
// If the pair (value, details) at index i, i+1 is not |
// a transition or null descriptor, mark the value. |
PropertyDetails details(Smi::cast(contents->get(i + 1))); |
+ |
+ Object** slot = contents->data_start() + i; |
+ Object* value = *slot; |
+ if (!value->IsHeapObject()) continue; |
+ |
+ RecordSlot(slot, slot, *slot); |
+ |
if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) { |
- HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i)); |
- if (object->IsHeapObject() && !object->IsMarked()) { |
- SetMark(object); |
- marking_stack_.Push(object); |
+ HeapObject* object = HeapObject::cast(value); |
+ MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object)); |
+ if (!mark.Get()) { |
+ SetMark(HeapObject::cast(object), mark); |
+ marking_deque_.PushBlack(object); |
} |
} |
} |
// The DescriptorArray descriptors contains a pointer to its contents array, |
// but the contents array is already marked. |
- marking_stack_.Push(descriptors); |
+ marking_deque_.PushBlack(descriptors); |
} |
void MarkCompactCollector::CreateBackPointers() { |
HeapObjectIterator iterator(heap()->map_space()); |
- for (HeapObject* next_object = iterator.next(); |
- next_object != NULL; next_object = iterator.next()) { |
- if (next_object->IsMap()) { // Could also be ByteArray on free list. |
+ for (HeapObject* next_object = iterator.Next(); |
+ next_object != NULL; next_object = iterator.Next()) { |
+ if (next_object->IsMap()) { // Could also be FreeSpace object on free list. |
Map* map = Map::cast(next_object); |
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); |
if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { |
@@ -1264,54 +1634,120 @@ |
} |
-static int OverflowObjectSize(HeapObject* obj) { |
- // Recover the normal map pointer, it might be marked as live and |
- // overflowed. |
- MapWord map_word = obj->map_word(); |
- map_word.ClearMark(); |
- map_word.ClearOverflow(); |
- return obj->SizeFromMap(map_word.ToMap()); |
+// Fill the marking stack with overflowed objects returned by the given |
+// iterator. Stop when the marking stack is filled or the end of the space |
+// is reached, whichever comes first. |
+template<class T> |
+static void DiscoverGreyObjectsWithIterator(Heap* heap, |
+ MarkingDeque* marking_deque, |
+ T* it) { |
+ // The caller should ensure that the marking stack is initially not full, |
+ // so that we don't waste effort pointlessly scanning for objects. |
+ ASSERT(!marking_deque->IsFull()); |
+ |
+ Map* filler_map = heap->one_pointer_filler_map(); |
+ for (HeapObject* object = it->Next(); |
+ object != NULL; |
+ object = it->Next()) { |
+ MarkBit markbit = Marking::MarkBitFrom(object); |
+ if ((object->map() != filler_map) && Marking::IsGrey(markbit)) { |
+ Marking::GreyToBlack(markbit); |
+ marking_deque->PushBlack(object); |
+ if (marking_deque->IsFull()) return; |
+ } |
+ } |
} |
-class OverflowedObjectsScanner : public AllStatic { |
- public: |
- // Fill the marking stack with overflowed objects returned by the given |
- // iterator. Stop when the marking stack is filled or the end of the space |
- // is reached, whichever comes first. |
- template<class T> |
- static inline void ScanOverflowedObjects(MarkCompactCollector* collector, |
- T* it) { |
- // The caller should ensure that the marking stack is initially not full, |
- // so that we don't waste effort pointlessly scanning for objects. |
- ASSERT(!collector->marking_stack_.is_full()); |
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts); |
- for (HeapObject* object = it->next(); object != NULL; object = it->next()) { |
- if (object->IsOverflowed()) { |
- object->ClearOverflow(); |
- ASSERT(object->IsMarked()); |
- ASSERT(HEAP->Contains(object)); |
- collector->marking_stack_.Push(object); |
- if (collector->marking_stack_.is_full()) return; |
- } |
+ |
+static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) { |
+ ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
+ ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
+ ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); |
+ ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
+ |
+ MarkBit::CellType* cells = p->markbits()->cells(); |
+ |
+ int last_cell_index = |
+ Bitmap::IndexToCell( |
+ Bitmap::CellAlignIndex( |
+ p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); |
+ |
+ int cell_index = Page::kFirstUsedCell; |
+ Address cell_base = p->ObjectAreaStart(); |
+ |
+ for (cell_index = Page::kFirstUsedCell; |
+ cell_index < last_cell_index; |
+ cell_index++, cell_base += 32 * kPointerSize) { |
+ ASSERT((unsigned)cell_index == |
+ Bitmap::IndexToCell( |
+ Bitmap::CellAlignIndex( |
+ p->AddressToMarkbitIndex(cell_base)))); |
+ |
+ const MarkBit::CellType current_cell = cells[cell_index]; |
+ if (current_cell == 0) continue; |
+ |
+ const MarkBit::CellType next_cell = cells[cell_index + 1]; |
+ MarkBit::CellType grey_objects = current_cell & |
+ ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1))); |
+ |
+ int offset = 0; |
+ while (grey_objects != 0) { |
+ int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects); |
+ grey_objects >>= trailing_zeros; |
+ offset += trailing_zeros; |
+ MarkBit markbit(&cells[cell_index], 1 << offset, false); |
+ ASSERT(Marking::IsGrey(markbit)); |
+ Marking::GreyToBlack(markbit); |
+ Address addr = cell_base + offset * kPointerSize; |
+ marking_deque->PushBlack(HeapObject::FromAddress(addr)); |
+ if (marking_deque->IsFull()) return; |
+ offset += 2; |
+ grey_objects >>= 2; |
} |
+ |
+ grey_objects >>= (Bitmap::kBitsPerCell - 1); |
} |
-}; |
+} |
+static void DiscoverGreyObjectsInSpace(Heap* heap, |
+ MarkingDeque* marking_deque, |
+ PagedSpace* space) { |
+ if (!space->was_swept_conservatively()) { |
+ HeapObjectIterator it(space); |
+ DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); |
+ } else { |
+ PageIterator it(space); |
+ while (it.has_next()) { |
+ Page* p = it.next(); |
+ DiscoverGreyObjectsOnPage(marking_deque, p); |
+ if (marking_deque->IsFull()) return; |
+ } |
+ } |
+} |
+ |
+ |
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { |
- return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked(); |
+ Object* o = *p; |
+ if (!o->IsHeapObject()) return false; |
+ HeapObject* heap_object = HeapObject::cast(o); |
+ MarkBit mark = Marking::MarkBitFrom(heap_object); |
+ return !mark.Get(); |
} |
void MarkCompactCollector::MarkSymbolTable() { |
- SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table(); |
+ SymbolTable* symbol_table = heap()->symbol_table(); |
// Mark the symbol table itself. |
- SetMark(symbol_table); |
+ MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table); |
+ SetMark(symbol_table, symbol_table_mark); |
// Explicitly mark the prefix. |
MarkingVisitor marker(heap()); |
symbol_table->IteratePrefix(&marker); |
- ProcessMarkingStack(); |
+ ProcessMarkingDeque(); |
} |
@@ -1324,9 +1760,9 @@ |
MarkSymbolTable(); |
// There may be overflowed objects in the heap. Visit them now. |
- while (marking_stack_.overflowed()) { |
- RefillMarkingStack(); |
- EmptyMarkingStack(); |
+ while (marking_deque_.overflowed()) { |
+ RefillMarkingDeque(); |
+ EmptyMarkingDeque(); |
} |
} |
@@ -1344,9 +1780,13 @@ |
bool group_marked = false; |
for (size_t j = 0; j < entry->length_; j++) { |
Object* object = *objects[j]; |
- if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) { |
- group_marked = true; |
- break; |
+ if (object->IsHeapObject()) { |
+ HeapObject* heap_object = HeapObject::cast(object); |
+ MarkBit mark = Marking::MarkBitFrom(heap_object); |
+ if (mark.Get()) { |
+ group_marked = true; |
+ break; |
+ } |
} |
} |
@@ -1355,17 +1795,21 @@ |
continue; |
} |
- // An object in the group is marked, so mark all heap objects in |
- // the group. |
+ // An object in the group is marked, so mark as grey all white heap |
+ // objects in the group. |
for (size_t j = 0; j < entry->length_; ++j) { |
- if ((*objects[j])->IsHeapObject()) { |
- MarkObject(HeapObject::cast(*objects[j])); |
+ Object* object = *objects[j]; |
+ if (object->IsHeapObject()) { |
+ HeapObject* heap_object = HeapObject::cast(object); |
+ MarkBit mark = Marking::MarkBitFrom(heap_object); |
+ MarkObject(heap_object, mark); |
} |
} |
- // Once the entire group has been marked, dispose it because it's |
- // not needed anymore. |
+ // Once the entire group has been colored grey, set the object group |
+ // to NULL so it won't be processed again. |
entry->Dispose(); |
+ object_groups->at(i) = NULL; |
} |
object_groups->Rewind(last); |
} |
@@ -1380,7 +1824,7 @@ |
ImplicitRefGroup* entry = ref_groups->at(i); |
ASSERT(entry != NULL); |
- if (!(*entry->parent_)->IsMarked()) { |
+ if (!IsMarked(*entry->parent_)) { |
(*ref_groups)[last++] = entry; |
continue; |
} |
@@ -1389,7 +1833,9 @@ |
// A parent object is marked, so mark all child heap objects. |
for (size_t j = 0; j < entry->length_; ++j) { |
if ((*children[j])->IsHeapObject()) { |
- MarkObject(HeapObject::cast(*children[j])); |
+ HeapObject* child = HeapObject::cast(*children[j]); |
+ MarkBit mark = Marking::MarkBitFrom(child); |
+ MarkObject(child, mark); |
} |
} |
@@ -1405,21 +1851,17 @@ |
// Before: the marking stack contains zero or more heap object pointers. |
// After: the marking stack is empty, and all objects reachable from the |
// marking stack have been marked, or are overflowed in the heap. |
-void MarkCompactCollector::EmptyMarkingStack() { |
- while (!marking_stack_.is_empty()) { |
- while (!marking_stack_.is_empty()) { |
- HeapObject* object = marking_stack_.Pop(); |
+void MarkCompactCollector::EmptyMarkingDeque() { |
+ while (!marking_deque_.IsEmpty()) { |
+ while (!marking_deque_.IsEmpty()) { |
+ HeapObject* object = marking_deque_.Pop(); |
ASSERT(object->IsHeapObject()); |
ASSERT(heap()->Contains(object)); |
- ASSERT(object->IsMarked()); |
- ASSERT(!object->IsOverflowed()); |
+ ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); |
- // Because the object is marked, we have to recover the original map |
- // pointer and use it to mark the object's body. |
- MapWord map_word = object->map_word(); |
- map_word.ClearMark(); |
- Map* map = map_word.ToMap(); |
- MarkObject(map); |
+ Map* map = object->map(); |
+ MarkBit map_mark = Marking::MarkBitFrom(map); |
+ MarkObject(map, map_mark); |
StaticMarkingVisitor::IterateBody(map, object); |
} |
@@ -1436,39 +1878,45 @@ |
// before sweeping completes. If sweeping completes, there are no remaining |
// overflowed objects in the heap so the overflow flag on the markings stack |
// is cleared. |
-void MarkCompactCollector::RefillMarkingStack() { |
- ASSERT(marking_stack_.overflowed()); |
+void MarkCompactCollector::RefillMarkingDeque() { |
+ ASSERT(marking_deque_.overflowed()); |
- SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize); |
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it); |
- if (marking_stack_.is_full()) return; |
+ SemiSpaceIterator new_it(heap()->new_space()); |
+ DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it); |
+ if (marking_deque_.IsFull()) return; |
- HeapObjectIterator old_pointer_it(heap()->old_pointer_space(), |
- &OverflowObjectSize); |
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it); |
- if (marking_stack_.is_full()) return; |
+ DiscoverGreyObjectsInSpace(heap(), |
+ &marking_deque_, |
+ heap()->old_pointer_space()); |
+ if (marking_deque_.IsFull()) return; |
- HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize); |
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it); |
- if (marking_stack_.is_full()) return; |
+ DiscoverGreyObjectsInSpace(heap(), |
+ &marking_deque_, |
+ heap()->old_data_space()); |
+ if (marking_deque_.IsFull()) return; |
- HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize); |
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it); |
- if (marking_stack_.is_full()) return; |
+ DiscoverGreyObjectsInSpace(heap(), |
+ &marking_deque_, |
+ heap()->code_space()); |
+ if (marking_deque_.IsFull()) return; |
- HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize); |
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it); |
- if (marking_stack_.is_full()) return; |
+ DiscoverGreyObjectsInSpace(heap(), |
+ &marking_deque_, |
+ heap()->map_space()); |
+ if (marking_deque_.IsFull()) return; |
- HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize); |
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it); |
- if (marking_stack_.is_full()) return; |
+ DiscoverGreyObjectsInSpace(heap(), |
+ &marking_deque_, |
+ heap()->cell_space()); |
+ if (marking_deque_.IsFull()) return; |
- LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize); |
- OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it); |
- if (marking_stack_.is_full()) return; |
+ LargeObjectIterator lo_it(heap()->lo_space()); |
+ DiscoverGreyObjectsWithIterator(heap(), |
+ &marking_deque_, |
+ &lo_it); |
+ if (marking_deque_.IsFull()) return; |
- marking_stack_.clear_overflowed(); |
+ marking_deque_.ClearOverflowed(); |
} |
@@ -1476,23 +1924,23 @@ |
// stack. Before: the marking stack contains zero or more heap object |
// pointers. After: the marking stack is empty and there are no overflowed |
// objects in the heap. |
-void MarkCompactCollector::ProcessMarkingStack() { |
- EmptyMarkingStack(); |
- while (marking_stack_.overflowed()) { |
- RefillMarkingStack(); |
- EmptyMarkingStack(); |
+void MarkCompactCollector::ProcessMarkingDeque() { |
+ EmptyMarkingDeque(); |
+ while (marking_deque_.overflowed()) { |
+ RefillMarkingDeque(); |
+ EmptyMarkingDeque(); |
} |
} |
void MarkCompactCollector::ProcessExternalMarking() { |
bool work_to_do = true; |
- ASSERT(marking_stack_.is_empty()); |
+ ASSERT(marking_deque_.IsEmpty()); |
while (work_to_do) { |
MarkObjectGroups(); |
MarkImplicitRefGroups(); |
- work_to_do = !marking_stack_.is_empty(); |
- ProcessMarkingStack(); |
+ work_to_do = !marking_deque_.IsEmpty(); |
+ ProcessMarkingDeque(); |
} |
} |
@@ -1504,16 +1952,43 @@ |
// with the C stack limit check. |
PostponeInterruptsScope postpone(heap()->isolate()); |
+ bool incremental_marking_overflowed = false; |
+ IncrementalMarking* incremental_marking = heap_->incremental_marking(); |
+ if (incremental_marking->IsMarking()) { |
+ // Finalize the incremental marking and check whether we had an overflow. |
+ // Both markers use grey color to mark overflowed objects so |
+ // non-incremental marker can deal with them as if overflow |
+ // occured during normal marking. |
+ // But incremental marker uses a separate marking deque |
+ // so we have to explicitly copy it's overflow state. |
+ incremental_marking->Finalize(); |
+ incremental_marking_overflowed = |
+ incremental_marking->marking_deque()->overflowed(); |
+ incremental_marking->marking_deque()->ClearOverflowed(); |
+ } else { |
+ // Abort any pending incremental activities e.g. incremental sweeping. |
+ incremental_marking->Abort(); |
+ } |
+ |
#ifdef DEBUG |
ASSERT(state_ == PREPARE_GC); |
state_ = MARK_LIVE_OBJECTS; |
#endif |
- // The to space contains live objects, the from space is used as a marking |
- // stack. |
- marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(), |
- heap()->new_space()->FromSpaceHigh()); |
+ // The to space contains live objects, a page in from space is used as a |
+ // marking stack. |
+ Address marking_deque_start = heap()->new_space()->FromSpacePageLow(); |
+ Address marking_deque_end = heap()->new_space()->FromSpacePageHigh(); |
+ if (FLAG_force_marking_deque_overflows) { |
+ marking_deque_end = marking_deque_start + 64 * kPointerSize; |
+ } |
+ marking_deque_.Initialize(marking_deque_start, |
+ marking_deque_end); |
+ ASSERT(!marking_deque_.overflowed()); |
- ASSERT(!marking_stack_.overflowed()); |
+ if (incremental_marking_overflowed) { |
+ // There are overflowed objects left in the heap after incremental marking. |
+ marking_deque_.SetOverflowed(); |
+ } |
PrepareForCodeFlushing(); |
@@ -1535,15 +2010,20 @@ |
&IsUnmarkedHeapObject); |
// Then we mark the objects and process the transitive closure. |
heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); |
- while (marking_stack_.overflowed()) { |
- RefillMarkingStack(); |
- EmptyMarkingStack(); |
+ while (marking_deque_.overflowed()) { |
+ RefillMarkingDeque(); |
+ EmptyMarkingDeque(); |
} |
// Repeat host application specific marking to mark unmarked objects |
// reachable from the weak roots. |
ProcessExternalMarking(); |
+ AfterMarking(); |
+} |
+ |
+ |
+void MarkCompactCollector::AfterMarking() { |
// Object literal map caches reference symbols (cache keys) and maps |
// (cache values). At this point still useful maps have already been |
// marked. Mark the keys for the alive values before we process the |
@@ -1553,7 +2033,7 @@ |
// Prune the symbol table removing all symbols only pointed to by the |
// symbol table. Cannot use symbol_table() here because the symbol |
// table is marked. |
- SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table(); |
+ SymbolTable* symbol_table = heap()->symbol_table(); |
SymbolTableCleaner v(heap()); |
symbol_table->IterateElements(&v); |
symbol_table->ElementsRemoved(v.PointersRemoved()); |
@@ -1582,13 +2062,13 @@ |
Object* raw_context = heap()->global_contexts_list_; |
while (raw_context != heap()->undefined_value()) { |
Context* context = reinterpret_cast<Context*>(raw_context); |
- if (context->IsMarked()) { |
+ if (IsMarked(context)) { |
HeapObject* raw_map_cache = |
HeapObject::cast(context->get(Context::MAP_CACHE_INDEX)); |
// A map cache may be reachable from the stack. In this case |
// it's already transitively marked and it's too late to clean |
// up its parts. |
- if (!raw_map_cache->IsMarked() && |
+ if (!IsMarked(raw_map_cache) && |
raw_map_cache != heap()->undefined_value()) { |
MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache); |
int existing_elements = map_cache->NumberOfElements(); |
@@ -1601,8 +2081,7 @@ |
raw_key == heap()->null_value()) continue; |
STATIC_ASSERT(MapCache::kEntrySize == 2); |
Object* raw_map = map_cache->get(i + 1); |
- if (raw_map->IsHeapObject() && |
- HeapObject::cast(raw_map)->IsMarked()) { |
+ if (raw_map->IsHeapObject() && IsMarked(raw_map)) { |
++used_elements; |
} else { |
// Delete useless entries with unmarked maps. |
@@ -1618,14 +2097,15 @@ |
// extra complexity during GC. We rely on subsequent cache |
// usages (EnsureCapacity) to do this. |
map_cache->ElementsRemoved(existing_elements - used_elements); |
- MarkObject(map_cache); |
+ MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache); |
+ MarkObject(map_cache, map_cache_markbit); |
} |
} |
} |
// Move to next element in the list. |
raw_context = context->get(Context::NEXT_CONTEXT_LINK); |
} |
- ProcessMarkingStack(); |
+ ProcessMarkingDeque(); |
} |
@@ -1655,27 +2135,26 @@ |
#endif // DEBUG |
-void MarkCompactCollector::SweepLargeObjectSpace() { |
-#ifdef DEBUG |
- ASSERT(state_ == MARK_LIVE_OBJECTS); |
- state_ = |
- compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES; |
-#endif |
- // Deallocate unmarked objects and clear marked bits for marked objects. |
- heap()->lo_space()->FreeUnmarkedObjects(); |
-} |
+void MarkCompactCollector::ReattachInitialMaps() { |
+ HeapObjectIterator map_iterator(heap()->map_space()); |
+ for (HeapObject* obj = map_iterator.Next(); |
+ obj != NULL; |
+ obj = map_iterator.Next()) { |
+ if (obj->IsFreeSpace()) continue; |
+ Map* map = Map::cast(obj); |
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); |
+ if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue; |
-// Safe to use during marking phase only. |
-bool MarkCompactCollector::SafeIsMap(HeapObject* object) { |
- MapWord metamap = object->map_word(); |
- metamap.ClearMark(); |
- return metamap.ToMap()->instance_type() == MAP_TYPE; |
+ if (map->attached_to_shared_function_info()) { |
+ JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map); |
+ } |
+ } |
} |
void MarkCompactCollector::ClearNonLiveTransitions() { |
- HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject); |
+ HeapObjectIterator map_iterator(heap()->map_space()); |
// Iterate over the map space, setting map transitions that go from |
// a marked map to an unmarked map to null transitions. At the same time, |
// set all the prototype fields of maps back to their original value, |
@@ -1686,17 +2165,19 @@ |
// scan the descriptor arrays of those maps, not all maps. |
// All of these actions are carried out only on maps of JSObjects |
// and related subtypes. |
- for (HeapObject* obj = map_iterator.next(); |
- obj != NULL; obj = map_iterator.next()) { |
+ for (HeapObject* obj = map_iterator.Next(); |
+ obj != NULL; obj = map_iterator.Next()) { |
Map* map = reinterpret_cast<Map*>(obj); |
- if (!map->IsMarked() && map->IsByteArray()) continue; |
+ MarkBit map_mark = Marking::MarkBitFrom(map); |
+ if (map->IsFreeSpace()) continue; |
- ASSERT(SafeIsMap(map)); |
+ ASSERT(map->IsMap()); |
// Only JSObject and subtypes have map transitions and back pointers. |
STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE); |
if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue; |
- if (map->IsMarked() && map->attached_to_shared_function_info()) { |
+ if (map_mark.Get() && |
+ map->attached_to_shared_function_info()) { |
// This map is used for inobject slack tracking and has been detached |
// from SharedFunctionInfo during the mark phase. |
// Since it survived the GC, reattach it now. |
@@ -1705,52 +2186,55 @@ |
// Clear dead prototype transitions. |
int number_of_transitions = map->NumberOfProtoTransitions(); |
- if (number_of_transitions > 0) { |
- FixedArray* prototype_transitions = |
- map->unchecked_prototype_transitions(); |
- int new_number_of_transitions = 0; |
- const int header = Map::kProtoTransitionHeaderSize; |
- const int proto_offset = |
- header + Map::kProtoTransitionPrototypeOffset; |
- const int map_offset = header + Map::kProtoTransitionMapOffset; |
- const int step = Map::kProtoTransitionElementsPerEntry; |
- for (int i = 0; i < number_of_transitions; i++) { |
- Object* prototype = prototype_transitions->get(proto_offset + i * step); |
- Object* cached_map = prototype_transitions->get(map_offset + i * step); |
- if (HeapObject::cast(prototype)->IsMarked() && |
- HeapObject::cast(cached_map)->IsMarked()) { |
- if (new_number_of_transitions != i) { |
- prototype_transitions->set_unchecked( |
- heap_, |
- proto_offset + new_number_of_transitions * step, |
- prototype, |
- UPDATE_WRITE_BARRIER); |
- prototype_transitions->set_unchecked( |
- heap_, |
- map_offset + new_number_of_transitions * step, |
- cached_map, |
- SKIP_WRITE_BARRIER); |
- } |
- new_number_of_transitions++; |
+ FixedArray* prototype_transitions = map->prototype_transitions(); |
+ |
+ int new_number_of_transitions = 0; |
+ const int header = Map::kProtoTransitionHeaderSize; |
+ const int proto_offset = |
+ header + Map::kProtoTransitionPrototypeOffset; |
+ const int map_offset = header + Map::kProtoTransitionMapOffset; |
+ const int step = Map::kProtoTransitionElementsPerEntry; |
+ for (int i = 0; i < number_of_transitions; i++) { |
+ Object* prototype = prototype_transitions->get(proto_offset + i * step); |
+ Object* cached_map = prototype_transitions->get(map_offset + i * step); |
+ if (IsMarked(prototype) && IsMarked(cached_map)) { |
+ if (new_number_of_transitions != i) { |
+ prototype_transitions->set_unchecked( |
+ heap_, |
+ proto_offset + new_number_of_transitions * step, |
+ prototype, |
+ UPDATE_WRITE_BARRIER); |
+ prototype_transitions->set_unchecked( |
+ heap_, |
+ map_offset + new_number_of_transitions * step, |
+ cached_map, |
+ SKIP_WRITE_BARRIER); |
} |
} |
// Fill slots that became free with undefined value. |
- Object* undefined = heap()->raw_unchecked_undefined_value(); |
+ Object* undefined = heap()->undefined_value(); |
for (int i = new_number_of_transitions * step; |
i < number_of_transitions * step; |
i++) { |
+ // The undefined object is on a page that is never compacted and never |
+ // in new space so it is OK to skip the write barrier. Also it's a |
+ // root. |
prototype_transitions->set_unchecked(heap_, |
header + i, |
undefined, |
SKIP_WRITE_BARRIER); |
+ |
+ Object** undefined_slot = |
+ prototype_transitions->data_start() + i; |
+ RecordSlot(undefined_slot, undefined_slot, undefined); |
} |
map->SetNumberOfProtoTransitions(new_number_of_transitions); |
} |
// Follow the chain of back pointers to find the prototype. |
Map* current = map; |
- while (SafeIsMap(current)) { |
+ while (current->IsMap()) { |
current = reinterpret_cast<Map*>(current->prototype()); |
ASSERT(current->IsHeapObject()); |
} |
@@ -1759,21 +2243,28 @@ |
// Follow back pointers, setting them to prototype, |
// clearing map transitions when necessary. |
current = map; |
- bool on_dead_path = !current->IsMarked(); |
+ bool on_dead_path = !map_mark.Get(); |
Object* next; |
- while (SafeIsMap(current)) { |
+ while (current->IsMap()) { |
next = current->prototype(); |
// There should never be a dead map above a live map. |
- ASSERT(on_dead_path || current->IsMarked()); |
+ MarkBit current_mark = Marking::MarkBitFrom(current); |
+ bool is_alive = current_mark.Get(); |
+ ASSERT(on_dead_path || is_alive); |
// A live map above a dead map indicates a dead transition. |
// This test will always be false on the first iteration. |
- if (on_dead_path && current->IsMarked()) { |
+ if (on_dead_path && is_alive) { |
on_dead_path = false; |
current->ClearNonLiveTransitions(heap(), real_prototype); |
} |
*HeapObject::RawField(current, Map::kPrototypeOffset) = |
real_prototype; |
+ |
+ if (is_alive) { |
+ Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset); |
+ RecordSlot(slot, slot, real_prototype); |
+ } |
current = reinterpret_cast<Map*>(next); |
} |
} |
@@ -1783,13 +2274,13 @@ |
void MarkCompactCollector::ProcessWeakMaps() { |
Object* weak_map_obj = encountered_weak_maps(); |
while (weak_map_obj != Smi::FromInt(0)) { |
- ASSERT(HeapObject::cast(weak_map_obj)->IsMarked()); |
+ ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj))); |
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); |
ObjectHashTable* table = weak_map->unchecked_table(); |
for (int i = 0; i < table->Capacity(); i++) { |
- if (HeapObject::cast(table->KeyAt(i))->IsMarked()) { |
+ if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { |
Object* value = table->get(table->EntryToValueIndex(i)); |
- StaticMarkingVisitor::MarkObjectByPointer(heap(), &value); |
+ StaticMarkingVisitor::VisitPointer(heap(), &value); |
table->set_unchecked(heap(), |
table->EntryToValueIndex(i), |
value, |
@@ -1804,11 +2295,11 @@ |
void MarkCompactCollector::ClearWeakMaps() { |
Object* weak_map_obj = encountered_weak_maps(); |
while (weak_map_obj != Smi::FromInt(0)) { |
- ASSERT(HeapObject::cast(weak_map_obj)->IsMarked()); |
+ ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj))); |
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); |
ObjectHashTable* table = weak_map->unchecked_table(); |
for (int i = 0; i < table->Capacity(); i++) { |
- if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) { |
+ if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { |
table->RemoveEntry(i, heap()); |
} |
} |
@@ -1818,316 +2309,89 @@ |
set_encountered_weak_maps(Smi::FromInt(0)); |
} |
-// ------------------------------------------------------------------------- |
-// Phase 2: Encode forwarding addresses. |
-// When compacting, forwarding addresses for objects in old space and map |
-// space are encoded in their map pointer word (along with an encoding of |
-// their map pointers). |
+ |
+// We scavange new space simultaneously with sweeping. This is done in two |
+// passes. |
// |
-// The excact encoding is described in the comments for class MapWord in |
-// objects.h. |
+// The first pass migrates all alive objects from one semispace to another or |
+// promotes them to old space. Forwarding address is written directly into |
+// first word of object without any encoding. If object is dead we write |
+// NULL as a forwarding address. |
// |
-// An address range [start, end) can have both live and non-live objects. |
-// Maximal non-live regions are marked so they can be skipped on subsequent |
-// sweeps of the heap. A distinguished map-pointer encoding is used to mark |
-// free regions of one-word size (in which case the next word is the start |
-// of a live object). A second distinguished map-pointer encoding is used |
-// to mark free regions larger than one word, and the size of the free |
-// region (including the first word) is written to the second word of the |
-// region. |
-// |
-// Any valid map page offset must lie in the object area of the page, so map |
-// page offsets less than Page::kObjectStartOffset are invalid. We use a |
-// pair of distinguished invalid map encodings (for single word and multiple |
-// words) to indicate free regions in the page found during computation of |
-// forwarding addresses and skipped over in subsequent sweeps. |
+// The second pass updates pointers to new space in all spaces. It is possible |
+// to encounter pointers to dead new space objects during traversal of pointers |
+// to new space. We should clear them to avoid encountering them during next |
+// pointer iteration. This is an issue if the store buffer overflows and we |
+// have to scan the entire old space, including dead objects, looking for |
+// pointers to new space. |
+void MarkCompactCollector::MigrateObject(Address dst, |
+ Address src, |
+ int size, |
+ AllocationSpace dest) { |
+ if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) { |
+ Address src_slot = src; |
+ Address dst_slot = dst; |
+ ASSERT(IsAligned(size, kPointerSize)); |
+ for (int remaining = size / kPointerSize; remaining > 0; remaining--) { |
+ Object* value = Memory::Object_at(src_slot); |
-// Encode a free region, defined by the given start address and size, in the |
-// first word or two of the region. |
-void EncodeFreeRegion(Address free_start, int free_size) { |
- ASSERT(free_size >= kIntSize); |
- if (free_size == kIntSize) { |
- Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding; |
- } else { |
- ASSERT(free_size >= 2 * kIntSize); |
- Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding; |
- Memory::int_at(free_start + kIntSize) = free_size; |
- } |
+ Memory::Object_at(dst_slot) = value; |
-#ifdef DEBUG |
- // Zap the body of the free region. |
- if (FLAG_enable_slow_asserts) { |
- for (int offset = 2 * kIntSize; |
- offset < free_size; |
- offset += kPointerSize) { |
- Memory::Address_at(free_start + offset) = kZapValue; |
+ if (heap_->InNewSpace(value)) { |
+ heap_->store_buffer()->Mark(dst_slot); |
+ } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { |
+ SlotsBuffer::AddTo(&slots_buffer_allocator_, |
+ &migration_slots_buffer_, |
+ reinterpret_cast<Object**>(dst_slot), |
+ SlotsBuffer::IGNORE_OVERFLOW); |
+ } |
+ |
+ src_slot += kPointerSize; |
+ dst_slot += kPointerSize; |
} |
- } |
-#endif |
-} |
+ if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) { |
+ Address code_entry_slot = dst + JSFunction::kCodeEntryOffset; |
+ Address code_entry = Memory::Address_at(code_entry_slot); |
-// Try to promote all objects in new space. Heap numbers and sequential |
-// strings are promoted to the code space, large objects to large object space, |
-// and all others to the old space. |
-inline MaybeObject* MCAllocateFromNewSpace(Heap* heap, |
- HeapObject* object, |
- int object_size) { |
- MaybeObject* forwarded; |
- if (object_size > heap->MaxObjectSizeInPagedSpace()) { |
- forwarded = Failure::Exception(); |
- } else { |
- OldSpace* target_space = heap->TargetSpace(object); |
- ASSERT(target_space == heap->old_pointer_space() || |
- target_space == heap->old_data_space()); |
- forwarded = target_space->MCAllocateRaw(object_size); |
- } |
- Object* result; |
- if (!forwarded->ToObject(&result)) { |
- result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked(); |
- } |
- return result; |
-} |
- |
- |
-// Allocation functions for the paged spaces call the space's MCAllocateRaw. |
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace( |
- Heap *heap, |
- HeapObject* ignore, |
- int object_size) { |
- return heap->old_pointer_space()->MCAllocateRaw(object_size); |
-} |
- |
- |
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace( |
- Heap* heap, |
- HeapObject* ignore, |
- int object_size) { |
- return heap->old_data_space()->MCAllocateRaw(object_size); |
-} |
- |
- |
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace( |
- Heap* heap, |
- HeapObject* ignore, |
- int object_size) { |
- return heap->code_space()->MCAllocateRaw(object_size); |
-} |
- |
- |
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace( |
- Heap* heap, |
- HeapObject* ignore, |
- int object_size) { |
- return heap->map_space()->MCAllocateRaw(object_size); |
-} |
- |
- |
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace( |
- Heap* heap, HeapObject* ignore, int object_size) { |
- return heap->cell_space()->MCAllocateRaw(object_size); |
-} |
- |
- |
-// The forwarding address is encoded at the same offset as the current |
-// to-space object, but in from space. |
-inline void EncodeForwardingAddressInNewSpace(Heap* heap, |
- HeapObject* old_object, |
- int object_size, |
- Object* new_object, |
- int* ignored) { |
- int offset = |
- heap->new_space()->ToSpaceOffsetForAddress(old_object->address()); |
- Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) = |
- HeapObject::cast(new_object)->address(); |
-} |
- |
- |
-// The forwarding address is encoded in the map pointer of the object as an |
-// offset (in terms of live bytes) from the address of the first live object |
-// in the page. |
-inline void EncodeForwardingAddressInPagedSpace(Heap* heap, |
- HeapObject* old_object, |
- int object_size, |
- Object* new_object, |
- int* offset) { |
- // Record the forwarding address of the first live object if necessary. |
- if (*offset == 0) { |
- Page::FromAddress(old_object->address())->mc_first_forwarded = |
- HeapObject::cast(new_object)->address(); |
- } |
- |
- MapWord encoding = |
- MapWord::EncodeAddress(old_object->map()->address(), *offset); |
- old_object->set_map_word(encoding); |
- *offset += object_size; |
- ASSERT(*offset <= Page::kObjectAreaSize); |
-} |
- |
- |
-// Most non-live objects are ignored. |
-inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {} |
- |
- |
-// Function template that, given a range of addresses (eg, a semispace or a |
-// paged space page), iterates through the objects in the range to clear |
-// mark bits and compute and encode forwarding addresses. As a side effect, |
-// maximal free chunks are marked so that they can be skipped on subsequent |
-// sweeps. |
-// |
-// The template parameters are an allocation function, a forwarding address |
-// encoding function, and a function to process non-live objects. |
-template<MarkCompactCollector::AllocationFunction Alloc, |
- MarkCompactCollector::EncodingFunction Encode, |
- MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive> |
-inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector, |
- Address start, |
- Address end, |
- int* offset) { |
- // The start address of the current free region while sweeping the space. |
- // This address is set when a transition from live to non-live objects is |
- // encountered. A value (an encoding of the 'next free region' pointer) |
- // is written to memory at this address when a transition from non-live to |
- // live objects is encountered. |
- Address free_start = NULL; |
- |
- // A flag giving the state of the previously swept object. Initially true |
- // to ensure that free_start is initialized to a proper address before |
- // trying to write to it. |
- bool is_prev_alive = true; |
- |
- int object_size; // Will be set on each iteration of the loop. |
- for (Address current = start; current < end; current += object_size) { |
- HeapObject* object = HeapObject::FromAddress(current); |
- if (object->IsMarked()) { |
- object->ClearMark(); |
- collector->tracer()->decrement_marked_count(); |
- object_size = object->Size(); |
- |
- Object* forwarded = |
- Alloc(collector->heap(), object, object_size)->ToObjectUnchecked(); |
- Encode(collector->heap(), object, object_size, forwarded, offset); |
- |
-#ifdef DEBUG |
- if (FLAG_gc_verbose) { |
- PrintF("forward %p -> %p.\n", object->address(), |
- HeapObject::cast(forwarded)->address()); |
+ if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { |
+ SlotsBuffer::AddTo(&slots_buffer_allocator_, |
+ &migration_slots_buffer_, |
+ SlotsBuffer::CODE_ENTRY_SLOT, |
+ code_entry_slot, |
+ SlotsBuffer::IGNORE_OVERFLOW); |
} |
-#endif |
- if (!is_prev_alive) { // Transition from non-live to live. |
- EncodeFreeRegion(free_start, static_cast<int>(current - free_start)); |
- is_prev_alive = true; |
- } |
- } else { // Non-live object. |
- object_size = object->Size(); |
- ProcessNonLive(object, collector->heap()->isolate()); |
- if (is_prev_alive) { // Transition from live to non-live. |
- free_start = current; |
- is_prev_alive = false; |
- } |
- LiveObjectList::ProcessNonLive(object); |
} |
- } |
- |
- // If we ended on a free region, mark it. |
- if (!is_prev_alive) { |
- EncodeFreeRegion(free_start, static_cast<int>(end - free_start)); |
- } |
-} |
- |
- |
-// Functions to encode the forwarding pointers in each compactable space. |
-void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() { |
- int ignored; |
- EncodeForwardingAddressesInRange<MCAllocateFromNewSpace, |
- EncodeForwardingAddressInNewSpace, |
- IgnoreNonLiveObject>( |
- this, |
- heap()->new_space()->bottom(), |
- heap()->new_space()->top(), |
- &ignored); |
-} |
- |
- |
-template<MarkCompactCollector::AllocationFunction Alloc, |
- MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive> |
-void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace( |
- PagedSpace* space) { |
- PageIterator it(space, PageIterator::PAGES_IN_USE); |
- while (it.has_next()) { |
- Page* p = it.next(); |
- |
- // The offset of each live object in the page from the first live object |
- // in the page. |
- int offset = 0; |
- EncodeForwardingAddressesInRange<Alloc, |
- EncodeForwardingAddressInPagedSpace, |
- ProcessNonLive>( |
- this, |
- p->ObjectAreaStart(), |
- p->AllocationTop(), |
- &offset); |
- } |
-} |
- |
- |
-// We scavange new space simultaneously with sweeping. This is done in two |
-// passes. |
-// The first pass migrates all alive objects from one semispace to another or |
-// promotes them to old space. Forwading address is written directly into |
-// first word of object without any encoding. If object is dead we are writing |
-// NULL as a forwarding address. |
-// The second pass updates pointers to new space in all spaces. It is possible |
-// to encounter pointers to dead objects during traversal of dirty regions we |
-// should clear them to avoid encountering them during next dirty regions |
-// iteration. |
-static void MigrateObject(Heap* heap, |
- Address dst, |
- Address src, |
- int size, |
- bool to_old_space) { |
- if (to_old_space) { |
- heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size); |
+ } else if (dest == CODE_SPACE) { |
+ PROFILE(heap()->isolate(), CodeMoveEvent(src, dst)); |
+ heap()->MoveBlock(dst, src, size); |
+ SlotsBuffer::AddTo(&slots_buffer_allocator_, |
+ &migration_slots_buffer_, |
+ SlotsBuffer::RELOCATED_CODE_OBJECT, |
+ dst, |
+ SlotsBuffer::IGNORE_OVERFLOW); |
+ Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src); |
} else { |
- heap->CopyBlock(dst, src, size); |
+ ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE); |
+ heap()->MoveBlock(dst, src, size); |
} |
- |
Memory::Address_at(src) = dst; |
} |
-class StaticPointersToNewGenUpdatingVisitor : public |
- StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> { |
- public: |
- static inline void VisitPointer(Heap* heap, Object** p) { |
- if (!(*p)->IsHeapObject()) return; |
- |
- HeapObject* obj = HeapObject::cast(*p); |
- Address old_addr = obj->address(); |
- |
- if (heap->new_space()->Contains(obj)) { |
- ASSERT(heap->InFromSpace(*p)); |
- *p = HeapObject::FromAddress(Memory::Address_at(old_addr)); |
- } |
- } |
-}; |
- |
- |
// Visitor for updating pointers from live objects in old spaces to new space. |
// It does not expect to encounter pointers to dead objects. |
-class PointersToNewGenUpdatingVisitor: public ObjectVisitor { |
+class PointersUpdatingVisitor: public ObjectVisitor { |
public: |
- explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { } |
+ explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { } |
void VisitPointer(Object** p) { |
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p); |
+ UpdatePointer(p); |
} |
void VisitPointers(Object** start, Object** end) { |
- for (Object** p = start; p < end; p++) { |
- StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p); |
- } |
+ for (Object** p = start; p < end; p++) UpdatePointer(p); |
} |
void VisitCodeTarget(RelocInfo* rinfo) { |
@@ -2148,67 +2412,86 @@ |
} |
private: |
+ inline void UpdatePointer(Object** p) { |
+ if (!(*p)->IsHeapObject()) return; |
+ |
+ HeapObject* obj = HeapObject::cast(*p); |
+ |
+ if (heap_->InNewSpace(obj) || |
+ MarkCompactCollector::IsOnEvacuationCandidate(obj)) { |
+ ASSERT(obj->map_word().IsForwardingAddress()); |
+ *p = obj->map_word().ToForwardingAddress(); |
+ ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*p)); |
+ } |
+ } |
+ |
Heap* heap_; |
}; |
-// Visitor for updating pointers from live objects in old spaces to new space. |
-// It can encounter pointers to dead objects in new space when traversing map |
-// space (see comment for MigrateObject). |
-static void UpdatePointerToNewGen(HeapObject** p) { |
- if (!(*p)->IsHeapObject()) return; |
+static void UpdatePointer(HeapObject** p, HeapObject* object) { |
+ ASSERT(*p == object); |
- Address old_addr = (*p)->address(); |
- ASSERT(HEAP->InFromSpace(*p)); |
+ Address old_addr = object->address(); |
Address new_addr = Memory::Address_at(old_addr); |
- if (new_addr == NULL) { |
- // We encountered pointer to a dead object. Clear it so we will |
- // not visit it again during next iteration of dirty regions. |
- *p = NULL; |
+ // The new space sweep will overwrite the map word of dead objects |
+ // with NULL. In this case we do not need to transfer this entry to |
+ // the store buffer which we are rebuilding. |
+ if (new_addr != NULL) { |
+ *p = HeapObject::FromAddress(new_addr); |
} else { |
- *p = HeapObject::FromAddress(new_addr); |
+ // We have to zap this pointer, because the store buffer may overflow later, |
+ // and then we have to scan the entire heap and we don't want to find |
+ // spurious newspace pointers in the old space. |
+ *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0)); |
} |
} |
-static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, |
- Object** p) { |
- Address old_addr = HeapObject::cast(*p)->address(); |
- Address new_addr = Memory::Address_at(old_addr); |
- return String::cast(HeapObject::FromAddress(new_addr)); |
+static String* UpdateReferenceInExternalStringTableEntry(Heap* heap, |
+ Object** p) { |
+ MapWord map_word = HeapObject::cast(*p)->map_word(); |
+ |
+ if (map_word.IsForwardingAddress()) { |
+ return String::cast(map_word.ToForwardingAddress()); |
+ } |
+ |
+ return String::cast(*p); |
} |
-static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) { |
+bool MarkCompactCollector::TryPromoteObject(HeapObject* object, |
+ int object_size) { |
Object* result; |
- if (object_size > heap->MaxObjectSizeInPagedSpace()) { |
+ if (object_size > heap()->MaxObjectSizeInPagedSpace()) { |
MaybeObject* maybe_result = |
- heap->lo_space()->AllocateRawFixedArray(object_size); |
+ heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE); |
if (maybe_result->ToObject(&result)) { |
HeapObject* target = HeapObject::cast(result); |
- MigrateObject(heap, target->address(), object->address(), object_size, |
- true); |
- heap->mark_compact_collector()->tracer()-> |
+ MigrateObject(target->address(), |
+ object->address(), |
+ object_size, |
+ LO_SPACE); |
+ heap()->mark_compact_collector()->tracer()-> |
increment_promoted_objects_size(object_size); |
return true; |
} |
} else { |
- OldSpace* target_space = heap->TargetSpace(object); |
+ OldSpace* target_space = heap()->TargetSpace(object); |
- ASSERT(target_space == heap->old_pointer_space() || |
- target_space == heap->old_data_space()); |
+ ASSERT(target_space == heap()->old_pointer_space() || |
+ target_space == heap()->old_data_space()); |
MaybeObject* maybe_result = target_space->AllocateRaw(object_size); |
if (maybe_result->ToObject(&result)) { |
HeapObject* target = HeapObject::cast(result); |
- MigrateObject(heap, |
- target->address(), |
+ MigrateObject(target->address(), |
object->address(), |
object_size, |
- target_space == heap->old_pointer_space()); |
- heap->mark_compact_collector()->tracer()-> |
+ target_space->identity()); |
+ heap()->mark_compact_collector()->tracer()-> |
increment_promoted_objects_size(object_size); |
return true; |
} |
@@ -2218,1110 +2501,981 @@ |
} |
-static void SweepNewSpace(Heap* heap, NewSpace* space) { |
- heap->CheckNewSpaceExpansionCriteria(); |
+void MarkCompactCollector::EvacuateNewSpace() { |
+ heap()->CheckNewSpaceExpansionCriteria(); |
- Address from_bottom = space->bottom(); |
- Address from_top = space->top(); |
+ NewSpace* new_space = heap()->new_space(); |
+ // Store allocation range before flipping semispaces. |
+ Address from_bottom = new_space->bottom(); |
+ Address from_top = new_space->top(); |
+ |
// Flip the semispaces. After flipping, to space is empty, from space has |
// live objects. |
- space->Flip(); |
- space->ResetAllocationInfo(); |
+ new_space->Flip(); |
+ new_space->ResetAllocationInfo(); |
- int size = 0; |
int survivors_size = 0; |
// First pass: traverse all objects in inactive semispace, remove marks, |
- // migrate live objects and write forwarding addresses. |
- for (Address current = from_bottom; current < from_top; current += size) { |
- HeapObject* object = HeapObject::FromAddress(current); |
- |
- if (object->IsMarked()) { |
- object->ClearMark(); |
- heap->mark_compact_collector()->tracer()->decrement_marked_count(); |
- |
- size = object->Size(); |
+ // migrate live objects and write forwarding addresses. This stage puts |
+ // new entries in the store buffer and may cause some pages to be marked |
+ // scan-on-scavenge. |
+ SemiSpaceIterator from_it(from_bottom, from_top); |
+ for (HeapObject* object = from_it.Next(); |
+ object != NULL; |
+ object = from_it.Next()) { |
+ MarkBit mark_bit = Marking::MarkBitFrom(object); |
+ if (mark_bit.Get()) { |
+ mark_bit.Clear(); |
+ int size = object->Size(); |
survivors_size += size; |
+ MemoryChunk::IncrementLiveBytes(object->address(), -size); |
// Aggressively promote young survivors to the old space. |
- if (TryPromoteObject(heap, object, size)) { |
+ if (TryPromoteObject(object, size)) { |
continue; |
} |
// Promotion failed. Just migrate object to another semispace. |
- // Allocation cannot fail at this point: semispaces are of equal size. |
- Object* target = space->AllocateRaw(size)->ToObjectUnchecked(); |
+ MaybeObject* allocation = new_space->AllocateRaw(size); |
+ if (allocation->IsFailure()) { |
+ if (!new_space->AddFreshPage()) { |
+ // Shouldn't happen. We are sweeping linearly, and to-space |
+ // has the same number of pages as from-space, so there is |
+ // always room. |
+ UNREACHABLE(); |
+ } |
+ allocation = new_space->AllocateRaw(size); |
+ ASSERT(!allocation->IsFailure()); |
+ } |
+ Object* target = allocation->ToObjectUnchecked(); |
- MigrateObject(heap, |
- HeapObject::cast(target)->address(), |
- current, |
+ MigrateObject(HeapObject::cast(target)->address(), |
+ object->address(), |
size, |
- false); |
+ NEW_SPACE); |
} else { |
// Process the dead object before we write a NULL into its header. |
LiveObjectList::ProcessNonLive(object); |
- size = object->Size(); |
- Memory::Address_at(current) = NULL; |
+ // Mark dead objects in the new space with null in their map field. |
+ Memory::Address_at(object->address()) = NULL; |
} |
} |
- // Second pass: find pointers to new space and update them. |
- PointersToNewGenUpdatingVisitor updating_visitor(heap); |
- |
- // Update pointers in to space. |
- Address current = space->bottom(); |
- while (current < space->top()) { |
- HeapObject* object = HeapObject::FromAddress(current); |
- current += |
- StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(), |
- object); |
- } |
- |
- // Update roots. |
- heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
- LiveObjectList::IterateElements(&updating_visitor); |
- |
- // Update pointers in old spaces. |
- heap->IterateDirtyRegions(heap->old_pointer_space(), |
- &Heap::IteratePointersInDirtyRegion, |
- &UpdatePointerToNewGen, |
- heap->WATERMARK_SHOULD_BE_VALID); |
- |
- heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen); |
- |
- // Update pointers from cells. |
- HeapObjectIterator cell_iterator(heap->cell_space()); |
- for (HeapObject* cell = cell_iterator.next(); |
- cell != NULL; |
- cell = cell_iterator.next()) { |
- if (cell->IsJSGlobalPropertyCell()) { |
- Address value_address = |
- reinterpret_cast<Address>(cell) + |
- (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); |
- updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); |
- } |
- } |
- |
- // Update pointer from the global contexts list. |
- updating_visitor.VisitPointer(heap->global_contexts_list_address()); |
- |
- // Update pointers from external string table. |
- heap->UpdateNewSpaceReferencesInExternalStringTable( |
- &UpdateNewSpaceReferenceInExternalStringTableEntry); |
- |
- // All pointers were updated. Update auxiliary allocation info. |
- heap->IncrementYoungSurvivorsCounter(survivors_size); |
- space->set_age_mark(space->top()); |
- |
- // Update JSFunction pointers from the runtime profiler. |
- heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); |
+ heap_->IncrementYoungSurvivorsCounter(survivors_size); |
+ new_space->set_age_mark(new_space->top()); |
} |
-static void SweepSpace(Heap* heap, PagedSpace* space) { |
- PageIterator it(space, PageIterator::PAGES_IN_USE); |
+void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { |
+ AlwaysAllocateScope always_allocate; |
+ PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
+ ASSERT(p->IsEvacuationCandidate() && !p->WasSwept()); |
+ MarkBit::CellType* cells = p->markbits()->cells(); |
+ p->MarkSweptPrecisely(); |
- // During sweeping of paged space we are trying to find longest sequences |
- // of pages without live objects and free them (instead of putting them on |
- // the free list). |
+ int last_cell_index = |
+ Bitmap::IndexToCell( |
+ Bitmap::CellAlignIndex( |
+ p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); |
- // Page preceding current. |
- Page* prev = Page::FromAddress(NULL); |
+ int cell_index = Page::kFirstUsedCell; |
+ Address cell_base = p->ObjectAreaStart(); |
+ int offsets[16]; |
- // First empty page in a sequence. |
- Page* first_empty_page = Page::FromAddress(NULL); |
+ for (cell_index = Page::kFirstUsedCell; |
+ cell_index < last_cell_index; |
+ cell_index++, cell_base += 32 * kPointerSize) { |
+ ASSERT((unsigned)cell_index == |
+ Bitmap::IndexToCell( |
+ Bitmap::CellAlignIndex( |
+ p->AddressToMarkbitIndex(cell_base)))); |
+ if (cells[cell_index] == 0) continue; |
- // Page preceding first empty page. |
- Page* prec_first_empty_page = Page::FromAddress(NULL); |
+ int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); |
+ for (int i = 0; i < live_objects; i++) { |
+ Address object_addr = cell_base + offsets[i] * kPointerSize; |
+ HeapObject* object = HeapObject::FromAddress(object_addr); |
+ ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); |
- // If last used page of space ends with a sequence of dead objects |
- // we can adjust allocation top instead of puting this free area into |
- // the free list. Thus during sweeping we keep track of such areas |
- // and defer their deallocation until the sweeping of the next page |
- // is done: if one of the next pages contains live objects we have |
- // to put such area into the free list. |
- Address last_free_start = NULL; |
- int last_free_size = 0; |
+ int size = object->Size(); |
- while (it.has_next()) { |
- Page* p = it.next(); |
+ MaybeObject* target = space->AllocateRaw(size); |
+ if (target->IsFailure()) { |
+ // OS refused to give us memory. |
+ V8::FatalProcessOutOfMemory("Evacuation"); |
+ return; |
+ } |
- bool is_previous_alive = true; |
- Address free_start = NULL; |
- HeapObject* object; |
+ Object* target_object = target->ToObjectUnchecked(); |
- for (Address current = p->ObjectAreaStart(); |
- current < p->AllocationTop(); |
- current += object->Size()) { |
- object = HeapObject::FromAddress(current); |
- if (object->IsMarked()) { |
- object->ClearMark(); |
- heap->mark_compact_collector()->tracer()->decrement_marked_count(); |
- |
- if (!is_previous_alive) { // Transition from free to live. |
- space->DeallocateBlock(free_start, |
- static_cast<int>(current - free_start), |
- true); |
- is_previous_alive = true; |
- } |
- } else { |
- heap->mark_compact_collector()->ReportDeleteIfNeeded( |
- object, heap->isolate()); |
- if (is_previous_alive) { // Transition from live to free. |
- free_start = current; |
- is_previous_alive = false; |
- } |
- LiveObjectList::ProcessNonLive(object); |
- } |
- // The object is now unmarked for the call to Size() at the top of the |
- // loop. |
+ MigrateObject(HeapObject::cast(target_object)->address(), |
+ object_addr, |
+ size, |
+ space->identity()); |
+ ASSERT(object->map_word().IsForwardingAddress()); |
} |
- bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop()) |
- || (!is_previous_alive && free_start == p->ObjectAreaStart()); |
+ // Clear marking bits for current cell. |
+ cells[cell_index] = 0; |
+ } |
+} |
- if (page_is_empty) { |
- // This page is empty. Check whether we are in the middle of |
- // sequence of empty pages and start one if not. |
- if (!first_empty_page->is_valid()) { |
- first_empty_page = p; |
- prec_first_empty_page = prev; |
- } |
- if (!is_previous_alive) { |
- // There are dead objects on this page. Update space accounting stats |
- // without putting anything into free list. |
- int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start); |
- if (size_in_bytes > 0) { |
- space->DeallocateBlock(free_start, size_in_bytes, false); |
+void MarkCompactCollector::EvacuatePages() { |
+ int npages = evacuation_candidates_.length(); |
+ for (int i = 0; i < npages; i++) { |
+ Page* p = evacuation_candidates_[i]; |
+ ASSERT(p->IsEvacuationCandidate() || |
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
+ if (p->IsEvacuationCandidate()) { |
+ // During compaction we might have to request a new page. |
+ // Check that space still have room for that. |
+ if (static_cast<PagedSpace*>(p->owner())->CanExpand()) { |
+ EvacuateLiveObjectsFromPage(p); |
+ } else { |
+ // Without room for expansion evacuation is not guaranteed to succeed. |
+ // Pessimistically abandon unevacuated pages. |
+ for (int j = i; j < npages; j++) { |
+ evacuation_candidates_[j]->ClearEvacuationCandidate(); |
+ evacuation_candidates_[j]->SetFlag(Page::RESCAN_ON_EVACUATION); |
} |
+ return; |
} |
- } else { |
- // This page is not empty. Sequence of empty pages ended on the previous |
- // one. |
- if (first_empty_page->is_valid()) { |
- space->FreePages(prec_first_empty_page, prev); |
- prec_first_empty_page = first_empty_page = Page::FromAddress(NULL); |
- } |
- |
- // If there is a free ending area on one of the previous pages we have |
- // deallocate that area and put it on the free list. |
- if (last_free_size > 0) { |
- Page::FromAddress(last_free_start)-> |
- SetAllocationWatermark(last_free_start); |
- space->DeallocateBlock(last_free_start, last_free_size, true); |
- last_free_start = NULL; |
- last_free_size = 0; |
- } |
- |
- // If the last region of this page was not live we remember it. |
- if (!is_previous_alive) { |
- ASSERT(last_free_size == 0); |
- last_free_size = static_cast<int>(p->AllocationTop() - free_start); |
- last_free_start = free_start; |
- } |
} |
- |
- prev = p; |
} |
- |
- // We reached end of space. See if we need to adjust allocation top. |
- Address new_allocation_top = NULL; |
- |
- if (first_empty_page->is_valid()) { |
- // Last used pages in space are empty. We can move allocation top backwards |
- // to the beginning of first empty page. |
- ASSERT(prev == space->AllocationTopPage()); |
- |
- new_allocation_top = first_empty_page->ObjectAreaStart(); |
- } |
- |
- if (last_free_size > 0) { |
- // There was a free ending area on the previous page. |
- // Deallocate it without putting it into freelist and move allocation |
- // top to the beginning of this free area. |
- space->DeallocateBlock(last_free_start, last_free_size, false); |
- new_allocation_top = last_free_start; |
- } |
- |
- if (new_allocation_top != NULL) { |
-#ifdef DEBUG |
- Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top); |
- if (!first_empty_page->is_valid()) { |
- ASSERT(new_allocation_top_page == space->AllocationTopPage()); |
- } else if (last_free_size > 0) { |
- ASSERT(new_allocation_top_page == prec_first_empty_page); |
- } else { |
- ASSERT(new_allocation_top_page == first_empty_page); |
- } |
-#endif |
- |
- space->SetTop(new_allocation_top); |
- } |
} |
-void MarkCompactCollector::EncodeForwardingAddresses() { |
- ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); |
- // Objects in the active semispace of the young generation may be |
- // relocated to the inactive semispace (if not promoted). Set the |
- // relocation info to the beginning of the inactive semispace. |
- heap()->new_space()->MCResetRelocationInfo(); |
- |
- // Compute the forwarding pointers in each space. |
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace, |
- ReportDeleteIfNeeded>( |
- heap()->old_pointer_space()); |
- |
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace, |
- IgnoreNonLiveObject>( |
- heap()->old_data_space()); |
- |
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace, |
- ReportDeleteIfNeeded>( |
- heap()->code_space()); |
- |
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace, |
- IgnoreNonLiveObject>( |
- heap()->cell_space()); |
- |
- |
- // Compute new space next to last after the old and code spaces have been |
- // compacted. Objects in new space can be promoted to old or code space. |
- EncodeForwardingAddressesInNewSpace(); |
- |
- // Compute map space last because computing forwarding addresses |
- // overwrites non-live objects. Objects in the other spaces rely on |
- // non-live map pointers to get the sizes of non-live objects. |
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace, |
- IgnoreNonLiveObject>( |
- heap()->map_space()); |
- |
- // Write relocation info to the top page, so we can use it later. This is |
- // done after promoting objects from the new space so we get the correct |
- // allocation top. |
- heap()->old_pointer_space()->MCWriteRelocationInfoToPage(); |
- heap()->old_data_space()->MCWriteRelocationInfoToPage(); |
- heap()->code_space()->MCWriteRelocationInfoToPage(); |
- heap()->map_space()->MCWriteRelocationInfoToPage(); |
- heap()->cell_space()->MCWriteRelocationInfoToPage(); |
-} |
- |
- |
-class MapIterator : public HeapObjectIterator { |
+class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
public: |
- explicit MapIterator(Heap* heap) |
- : HeapObjectIterator(heap->map_space(), &SizeCallback) { } |
- |
- MapIterator(Heap* heap, Address start) |
- : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { } |
- |
- private: |
- static int SizeCallback(HeapObject* unused) { |
- USE(unused); |
- return Map::kSize; |
+ virtual Object* RetainAs(Object* object) { |
+ if (object->IsHeapObject()) { |
+ HeapObject* heap_object = HeapObject::cast(object); |
+ MapWord map_word = heap_object->map_word(); |
+ if (map_word.IsForwardingAddress()) { |
+ return map_word.ToForwardingAddress(); |
+ } |
+ } |
+ return object; |
} |
}; |
-class MapCompact { |
- public: |
- explicit MapCompact(Heap* heap, int live_maps) |
- : heap_(heap), |
- live_maps_(live_maps), |
- to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)), |
- vacant_map_it_(heap), |
- map_to_evacuate_it_(heap, to_evacuate_start_), |
- first_map_to_evacuate_( |
- reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) { |
- } |
+static inline void UpdateSlot(Object** slot) { |
+ Object* obj = *slot; |
+ if (!obj->IsHeapObject()) return; |
- void CompactMaps() { |
- // As we know the number of maps to evacuate beforehand, |
- // we stop then there is no more vacant maps. |
- for (Map* next_vacant_map = NextVacantMap(); |
- next_vacant_map; |
- next_vacant_map = NextVacantMap()) { |
- EvacuateMap(next_vacant_map, NextMapToEvacuate()); |
- } |
+ HeapObject* heap_obj = HeapObject::cast(obj); |
-#ifdef DEBUG |
- CheckNoMapsToEvacuate(); |
-#endif |
+ MapWord map_word = heap_obj->map_word(); |
+ if (map_word.IsForwardingAddress()) { |
+ ASSERT(MarkCompactCollector::IsOnEvacuationCandidate(*slot)); |
+ *slot = map_word.ToForwardingAddress(); |
+ ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot)); |
} |
+} |
- void UpdateMapPointersInRoots() { |
- MapUpdatingVisitor map_updating_visitor; |
- heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG); |
- heap()->isolate()->global_handles()->IterateWeakRoots( |
- &map_updating_visitor); |
- LiveObjectList::IterateElements(&map_updating_visitor); |
- } |
- void UpdateMapPointersInPagedSpace(PagedSpace* space) { |
- ASSERT(space != heap()->map_space()); |
- |
- PageIterator it(space, PageIterator::PAGES_IN_USE); |
- while (it.has_next()) { |
- Page* p = it.next(); |
- UpdateMapPointersInRange(heap(), |
- p->ObjectAreaStart(), |
- p->AllocationTop()); |
+static inline void UpdateSlot(ObjectVisitor* v, |
+ SlotsBuffer::SlotType slot_type, |
+ Address addr) { |
+ switch (slot_type) { |
+ case SlotsBuffer::CODE_TARGET_SLOT: { |
+ RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, NULL, NULL); |
+ rinfo.Visit(v); |
+ break; |
} |
- } |
- |
- void UpdateMapPointersInNewSpace() { |
- NewSpace* space = heap()->new_space(); |
- UpdateMapPointersInRange(heap(), space->bottom(), space->top()); |
- } |
- |
- void UpdateMapPointersInLargeObjectSpace() { |
- LargeObjectIterator it(heap()->lo_space()); |
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) |
- UpdateMapPointersInObject(heap(), obj); |
- } |
- |
- void Finish() { |
- heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_); |
- } |
- |
- inline Heap* heap() const { return heap_; } |
- |
- private: |
- Heap* heap_; |
- int live_maps_; |
- Address to_evacuate_start_; |
- MapIterator vacant_map_it_; |
- MapIterator map_to_evacuate_it_; |
- Map* first_map_to_evacuate_; |
- |
- // Helper class for updating map pointers in HeapObjects. |
- class MapUpdatingVisitor: public ObjectVisitor { |
- public: |
- MapUpdatingVisitor() {} |
- |
- void VisitPointer(Object** p) { |
- UpdateMapPointer(p); |
+ case SlotsBuffer::CODE_ENTRY_SLOT: { |
+ v->VisitCodeEntry(addr); |
+ break; |
} |
- |
- void VisitPointers(Object** start, Object** end) { |
- for (Object** p = start; p < end; p++) UpdateMapPointer(p); |
+ case SlotsBuffer::RELOCATED_CODE_OBJECT: { |
+ HeapObject* obj = HeapObject::FromAddress(addr); |
+ Code::cast(obj)->CodeIterateBody(v); |
+ break; |
} |
- |
- private: |
- void UpdateMapPointer(Object** p) { |
- if (!(*p)->IsHeapObject()) return; |
- HeapObject* old_map = reinterpret_cast<HeapObject*>(*p); |
- |
- // Moved maps are tagged with overflowed map word. They are the only |
- // objects those map word is overflowed as marking is already complete. |
- MapWord map_word = old_map->map_word(); |
- if (!map_word.IsOverflowed()) return; |
- |
- *p = GetForwardedMap(map_word); |
+ case SlotsBuffer::DEBUG_TARGET_SLOT: { |
+ RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, NULL, NULL); |
+ if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v); |
+ break; |
} |
- }; |
- |
- static Map* NextMap(MapIterator* it, HeapObject* last, bool live) { |
- while (true) { |
- HeapObject* next = it->next(); |
- ASSERT(next != NULL); |
- if (next == last) |
- return NULL; |
- ASSERT(!next->IsOverflowed()); |
- ASSERT(!next->IsMarked()); |
- ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next)); |
- if (next->IsMap() == live) |
- return reinterpret_cast<Map*>(next); |
+ case SlotsBuffer::JS_RETURN_SLOT: { |
+ RelocInfo rinfo(addr, RelocInfo::JS_RETURN, NULL, NULL); |
+ if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v); |
+ break; |
} |
+ default: |
+ UNREACHABLE(); |
+ break; |
} |
+} |
- Map* NextVacantMap() { |
- Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false); |
- ASSERT(map == NULL || FreeListNode::IsFreeListNode(map)); |
- return map; |
- } |
- Map* NextMapToEvacuate() { |
- Map* map = NextMap(&map_to_evacuate_it_, NULL, true); |
- ASSERT(map != NULL); |
- ASSERT(map->IsMap()); |
- return map; |
- } |
- |
- static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) { |
- ASSERT(FreeListNode::IsFreeListNode(vacant_map)); |
- ASSERT(map_to_evacuate->IsMap()); |
- |
- ASSERT(Map::kSize % 4 == 0); |
- |
- map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks( |
- vacant_map->address(), map_to_evacuate->address(), Map::kSize); |
- |
- ASSERT(vacant_map->IsMap()); // Due to memcpy above. |
- |
- MapWord forwarding_map_word = MapWord::FromMap(vacant_map); |
- forwarding_map_word.SetOverflow(); |
- map_to_evacuate->set_map_word(forwarding_map_word); |
- |
- ASSERT(map_to_evacuate->map_word().IsOverflowed()); |
- ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map); |
- } |
- |
- static Map* GetForwardedMap(MapWord map_word) { |
- ASSERT(map_word.IsOverflowed()); |
- map_word.ClearOverflow(); |
- Map* new_map = map_word.ToMap(); |
- ASSERT_MAP_ALIGNED(new_map->address()); |
- return new_map; |
- } |
- |
- static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) { |
- ASSERT(!obj->IsMarked()); |
- Map* map = obj->map(); |
- ASSERT(heap->map_space()->Contains(map)); |
- MapWord map_word = map->map_word(); |
- ASSERT(!map_word.IsMarked()); |
- if (map_word.IsOverflowed()) { |
- Map* new_map = GetForwardedMap(map_word); |
- ASSERT(heap->map_space()->Contains(new_map)); |
- obj->set_map(new_map); |
- |
-#ifdef DEBUG |
- if (FLAG_gc_verbose) { |
- PrintF("update %p : %p -> %p\n", |
- obj->address(), |
- reinterpret_cast<void*>(map), |
- reinterpret_cast<void*>(new_map)); |
+static inline void UpdateSlotsInRange(Object** start, Object** end) { |
+ for (Object** slot = start; |
+ slot < end; |
+ slot++) { |
+ Object* obj = *slot; |
+ if (obj->IsHeapObject() && |
+ MarkCompactCollector::IsOnEvacuationCandidate(obj)) { |
+ MapWord map_word = HeapObject::cast(obj)->map_word(); |
+ if (map_word.IsForwardingAddress()) { |
+ *slot = map_word.ToForwardingAddress(); |
+ ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot)); |
} |
-#endif |
} |
- |
- int size = obj->SizeFromMap(map); |
- MapUpdatingVisitor map_updating_visitor; |
- obj->IterateBody(map->instance_type(), size, &map_updating_visitor); |
- return size; |
} |
+} |
- static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) { |
- HeapObject* object; |
- int size; |
- for (Address current = start; current < end; current += size) { |
- object = HeapObject::FromAddress(current); |
- size = UpdateMapPointersInObject(heap, object); |
- ASSERT(size > 0); |
- } |
- } |
-#ifdef DEBUG |
- void CheckNoMapsToEvacuate() { |
- if (!FLAG_enable_slow_asserts) |
- return; |
- |
- for (HeapObject* obj = map_to_evacuate_it_.next(); |
- obj != NULL; obj = map_to_evacuate_it_.next()) |
- ASSERT(FreeListNode::IsFreeListNode(obj)); |
- } |
-#endif |
+enum SweepingMode { |
+ SWEEP_ONLY, |
+ SWEEP_AND_VISIT_LIVE_OBJECTS |
}; |
-void MarkCompactCollector::SweepSpaces() { |
- GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); |
+enum SkipListRebuildingMode { |
+ REBUILD_SKIP_LIST, |
+ IGNORE_SKIP_LIST |
+}; |
- ASSERT(state_ == SWEEP_SPACES); |
- ASSERT(!IsCompacting()); |
- // Noncompacting collections simply sweep the spaces to clear the mark |
- // bits and free the nonlive blocks (for old and map spaces). We sweep |
- // the map space last because freeing non-live maps overwrites them and |
- // the other spaces rely on possibly non-live maps to get the sizes for |
- // non-live objects. |
- SweepSpace(heap(), heap()->old_pointer_space()); |
- SweepSpace(heap(), heap()->old_data_space()); |
- SweepSpace(heap(), heap()->code_space()); |
- SweepSpace(heap(), heap()->cell_space()); |
- { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); |
- SweepNewSpace(heap(), heap()->new_space()); |
- } |
- SweepSpace(heap(), heap()->map_space()); |
- heap()->IterateDirtyRegions(heap()->map_space(), |
- &heap()->IteratePointersInDirtyMapsRegion, |
- &UpdatePointerToNewGen, |
- heap()->WATERMARK_SHOULD_BE_VALID); |
+// Sweep a space precisely. After this has been done the space can |
+// be iterated precisely, hitting only the live objects. Code space |
+// is always swept precisely because we want to be able to iterate |
+// over it. Map space is swept precisely, because it is not compacted. |
+// Slots in live objects pointing into evacuation candidates are updated |
+// if requested. |
+template<SkipListRebuildingMode skip_list_mode> |
+static void SweepPrecisely(PagedSpace* space, |
+ Page* p, |
+ SweepingMode mode, |
+ ObjectVisitor* v) { |
+ ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); |
+ ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST, |
+ space->identity() == CODE_SPACE); |
+ ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
- intptr_t live_maps_size = heap()->map_space()->Size(); |
- int live_maps = static_cast<int>(live_maps_size / Map::kSize); |
- ASSERT(live_map_objects_size_ == live_maps_size); |
+ MarkBit::CellType* cells = p->markbits()->cells(); |
+ p->MarkSweptPrecisely(); |
- if (heap()->map_space()->NeedsCompaction(live_maps)) { |
- MapCompact map_compact(heap(), live_maps); |
+ int last_cell_index = |
+ Bitmap::IndexToCell( |
+ Bitmap::CellAlignIndex( |
+ p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); |
- map_compact.CompactMaps(); |
- map_compact.UpdateMapPointersInRoots(); |
+ int cell_index = Page::kFirstUsedCell; |
+ Address free_start = p->ObjectAreaStart(); |
+ ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
+ Address object_address = p->ObjectAreaStart(); |
+ int offsets[16]; |
- PagedSpaces spaces; |
- for (PagedSpace* space = spaces.next(); |
- space != NULL; space = spaces.next()) { |
- if (space == heap()->map_space()) continue; |
- map_compact.UpdateMapPointersInPagedSpace(space); |
- } |
- map_compact.UpdateMapPointersInNewSpace(); |
- map_compact.UpdateMapPointersInLargeObjectSpace(); |
- |
- map_compact.Finish(); |
+ SkipList* skip_list = p->skip_list(); |
+ int curr_region = -1; |
+ if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { |
+ skip_list->Clear(); |
} |
-} |
- |
-// Iterate the live objects in a range of addresses (eg, a page or a |
-// semispace). The live regions of the range have been linked into a list. |
-// The first live region is [first_live_start, first_live_end), and the last |
-// address in the range is top. The callback function is used to get the |
-// size of each live object. |
-int MarkCompactCollector::IterateLiveObjectsInRange( |
- Address start, |
- Address end, |
- LiveObjectCallback size_func) { |
- int live_objects_size = 0; |
- Address current = start; |
- while (current < end) { |
- uint32_t encoded_map = Memory::uint32_at(current); |
- if (encoded_map == kSingleFreeEncoding) { |
- current += kPointerSize; |
- } else if (encoded_map == kMultiFreeEncoding) { |
- current += Memory::int_at(current + kIntSize); |
- } else { |
- int size = (this->*size_func)(HeapObject::FromAddress(current)); |
- current += size; |
- live_objects_size += size; |
+ for (cell_index = Page::kFirstUsedCell; |
+ cell_index < last_cell_index; |
+ cell_index++, object_address += 32 * kPointerSize) { |
+ ASSERT((unsigned)cell_index == |
+ Bitmap::IndexToCell( |
+ Bitmap::CellAlignIndex( |
+ p->AddressToMarkbitIndex(object_address)))); |
+ int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); |
+ int live_index = 0; |
+ for ( ; live_objects != 0; live_objects--) { |
+ Address free_end = object_address + offsets[live_index++] * kPointerSize; |
+ if (free_end != free_start) { |
+ space->Free(free_start, static_cast<int>(free_end - free_start)); |
+ } |
+ HeapObject* live_object = HeapObject::FromAddress(free_end); |
+ ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); |
+ Map* map = live_object->map(); |
+ int size = live_object->SizeFromMap(map); |
+ if (mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { |
+ live_object->IterateBody(map->instance_type(), size, v); |
+ } |
+ if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { |
+ int new_region_start = |
+ SkipList::RegionNumber(free_end); |
+ int new_region_end = |
+ SkipList::RegionNumber(free_end + size - kPointerSize); |
+ if (new_region_start != curr_region || |
+ new_region_end != curr_region) { |
+ skip_list->AddObject(free_end, size); |
+ curr_region = new_region_end; |
+ } |
+ } |
+ free_start = free_end + size; |
} |
+ // Clear marking bits for current cell. |
+ cells[cell_index] = 0; |
} |
- return live_objects_size; |
-} |
- |
- |
-int MarkCompactCollector::IterateLiveObjects( |
- NewSpace* space, LiveObjectCallback size_f) { |
- ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); |
- return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f); |
-} |
- |
- |
-int MarkCompactCollector::IterateLiveObjects( |
- PagedSpace* space, LiveObjectCallback size_f) { |
- ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); |
- int total = 0; |
- PageIterator it(space, PageIterator::PAGES_IN_USE); |
- while (it.has_next()) { |
- Page* p = it.next(); |
- total += IterateLiveObjectsInRange(p->ObjectAreaStart(), |
- p->AllocationTop(), |
- size_f); |
+ if (free_start != p->ObjectAreaEnd()) { |
+ space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); |
} |
- return total; |
} |
-// ------------------------------------------------------------------------- |
-// Phase 3: Update pointers |
+void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
+ EvacuateNewSpace(); |
+ EvacuatePages(); |
-// Helper class for updating pointers in HeapObjects. |
-class UpdatingVisitor: public ObjectVisitor { |
- public: |
- explicit UpdatingVisitor(Heap* heap) : heap_(heap) {} |
+ // Second pass: find pointers to new space and update them. |
+ PointersUpdatingVisitor updating_visitor(heap()); |
- void VisitPointer(Object** p) { |
- UpdatePointer(p); |
+ // Update pointers in to space. |
+ SemiSpaceIterator to_it(heap()->new_space()->bottom(), |
+ heap()->new_space()->top()); |
+ for (HeapObject* object = to_it.Next(); |
+ object != NULL; |
+ object = to_it.Next()) { |
+ Map* map = object->map(); |
+ object->IterateBody(map->instance_type(), |
+ object->SizeFromMap(map), |
+ &updating_visitor); |
} |
- void VisitPointers(Object** start, Object** end) { |
- // Mark all HeapObject pointers in [start, end) |
- for (Object** p = start; p < end; p++) UpdatePointer(p); |
- } |
+ // Update roots. |
+ heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
+ LiveObjectList::IterateElements(&updating_visitor); |
- void VisitCodeTarget(RelocInfo* rinfo) { |
- ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); |
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
- VisitPointer(&target); |
- rinfo->set_target_address( |
- reinterpret_cast<Code*>(target)->instruction_start()); |
+ { |
+ StoreBufferRebuildScope scope(heap_, |
+ heap_->store_buffer(), |
+ &Heap::ScavengeStoreBufferCallback); |
+ heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); |
} |
- void VisitDebugTarget(RelocInfo* rinfo) { |
- ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && |
- rinfo->IsPatchedReturnSequence()) || |
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && |
- rinfo->IsPatchedDebugBreakSlotSequence())); |
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); |
- VisitPointer(&target); |
- rinfo->set_call_address( |
- reinterpret_cast<Code*>(target)->instruction_start()); |
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_); |
+ if (FLAG_trace_fragmentation) { |
+ PrintF(" migration slots buffer: %d\n", |
+ SlotsBuffer::SizeOfChain(migration_slots_buffer_)); |
} |
- inline Heap* heap() const { return heap_; } |
+ int npages = evacuation_candidates_.length(); |
+ for (int i = 0; i < npages; i++) { |
+ Page* p = evacuation_candidates_[i]; |
+ ASSERT(p->IsEvacuationCandidate() || |
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
- private: |
- void UpdatePointer(Object** p) { |
- if (!(*p)->IsHeapObject()) return; |
- |
- HeapObject* obj = HeapObject::cast(*p); |
- Address old_addr = obj->address(); |
- Address new_addr; |
- ASSERT(!heap()->InFromSpace(obj)); |
- |
- if (heap()->new_space()->Contains(obj)) { |
- Address forwarding_pointer_addr = |
- heap()->new_space()->FromSpaceLow() + |
- heap()->new_space()->ToSpaceOffsetForAddress(old_addr); |
- new_addr = Memory::Address_at(forwarding_pointer_addr); |
- |
-#ifdef DEBUG |
- ASSERT(heap()->old_pointer_space()->Contains(new_addr) || |
- heap()->old_data_space()->Contains(new_addr) || |
- heap()->new_space()->FromSpaceContains(new_addr) || |
- heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr))); |
- |
- if (heap()->new_space()->FromSpaceContains(new_addr)) { |
- ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <= |
- heap()->new_space()->ToSpaceOffsetForAddress(old_addr)); |
+ if (p->IsEvacuationCandidate()) { |
+ SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer()); |
+ if (FLAG_trace_fragmentation) { |
+ PrintF(" page %p slots buffer: %d\n", |
+ reinterpret_cast<void*>(p), |
+ SlotsBuffer::SizeOfChain(p->slots_buffer())); |
} |
-#endif |
- } else if (heap()->lo_space()->Contains(obj)) { |
- // Don't move objects in the large object space. |
- return; |
- |
+ // Important: skip list should be cleared only after roots were updated |
+ // because root iteration traverses the stack and might have to find code |
+ // objects from non-updated pc pointing into evacuation candidate. |
+ SkipList* list = p->skip_list(); |
+ if (list != NULL) list->Clear(); |
} else { |
-#ifdef DEBUG |
- PagedSpaces spaces; |
- PagedSpace* original_space = spaces.next(); |
- while (original_space != NULL) { |
- if (original_space->Contains(obj)) break; |
- original_space = spaces.next(); |
+ if (FLAG_gc_verbose) { |
+ PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", |
+ reinterpret_cast<intptr_t>(p)); |
} |
- ASSERT(original_space != NULL); |
-#endif |
- new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj); |
- ASSERT(original_space->Contains(new_addr)); |
- ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <= |
- original_space->MCSpaceOffsetForAddress(old_addr)); |
+ PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
+ p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
+ |
+ switch (space->identity()) { |
+ case OLD_DATA_SPACE: |
+ SweepConservatively(space, p); |
+ break; |
+ case OLD_POINTER_SPACE: |
+ SweepPrecisely<IGNORE_SKIP_LIST>(space, |
+ p, |
+ SWEEP_AND_VISIT_LIVE_OBJECTS, |
+ &updating_visitor); |
+ break; |
+ case CODE_SPACE: |
+ SweepPrecisely<REBUILD_SKIP_LIST>(space, |
+ p, |
+ SWEEP_AND_VISIT_LIVE_OBJECTS, |
+ &updating_visitor); |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ break; |
+ } |
} |
+ } |
- *p = HeapObject::FromAddress(new_addr); |
- |
-#ifdef DEBUG |
- if (FLAG_gc_verbose) { |
- PrintF("update %p : %p -> %p\n", |
- reinterpret_cast<Address>(p), old_addr, new_addr); |
+ // Update pointers from cells. |
+ HeapObjectIterator cell_iterator(heap_->cell_space()); |
+ for (HeapObject* cell = cell_iterator.Next(); |
+ cell != NULL; |
+ cell = cell_iterator.Next()) { |
+ if (cell->IsJSGlobalPropertyCell()) { |
+ Address value_address = |
+ reinterpret_cast<Address>(cell) + |
+ (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); |
+ updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); |
} |
-#endif |
} |
- Heap* heap_; |
-}; |
+ // Update pointer from the global contexts list. |
+ updating_visitor.VisitPointer(heap_->global_contexts_list_address()); |
+ heap_->symbol_table()->Iterate(&updating_visitor); |
-void MarkCompactCollector::UpdatePointers() { |
-#ifdef DEBUG |
- ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); |
- state_ = UPDATE_POINTERS; |
-#endif |
- UpdatingVisitor updating_visitor(heap()); |
+ // Update pointers from external string table. |
+ heap_->UpdateReferencesInExternalStringTable( |
+ &UpdateReferenceInExternalStringTableEntry); |
+ |
+ // Update JSFunction pointers from the runtime profiler. |
heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( |
&updating_visitor); |
- heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG); |
- heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor); |
- // Update the pointer to the head of the weak list of global contexts. |
- updating_visitor.VisitPointer(&heap()->global_contexts_list_); |
+ EvacuationWeakObjectRetainer evacuation_object_retainer; |
+ heap()->ProcessWeakReferences(&evacuation_object_retainer); |
- LiveObjectList::IterateElements(&updating_visitor); |
- |
- int live_maps_size = IterateLiveObjects( |
- heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject); |
- int live_pointer_olds_size = IterateLiveObjects( |
- heap()->old_pointer_space(), |
- &MarkCompactCollector::UpdatePointersInOldObject); |
- int live_data_olds_size = IterateLiveObjects( |
- heap()->old_data_space(), |
- &MarkCompactCollector::UpdatePointersInOldObject); |
- int live_codes_size = IterateLiveObjects( |
- heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject); |
- int live_cells_size = IterateLiveObjects( |
- heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject); |
- int live_news_size = IterateLiveObjects( |
- heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject); |
- |
- // Large objects do not move, the map word can be updated directly. |
- LargeObjectIterator it(heap()->lo_space()); |
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { |
- UpdatePointersInNewObject(obj); |
- } |
- |
- USE(live_maps_size); |
- USE(live_pointer_olds_size); |
- USE(live_data_olds_size); |
- USE(live_codes_size); |
- USE(live_cells_size); |
- USE(live_news_size); |
- ASSERT(live_maps_size == live_map_objects_size_); |
- ASSERT(live_data_olds_size == live_old_data_objects_size_); |
- ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); |
- ASSERT(live_codes_size == live_code_objects_size_); |
- ASSERT(live_cells_size == live_cell_objects_size_); |
- ASSERT(live_news_size == live_young_objects_size_); |
-} |
- |
- |
-int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) { |
- // Keep old map pointers |
- Map* old_map = obj->map(); |
- ASSERT(old_map->IsHeapObject()); |
- |
- Address forwarded = GetForwardingAddressInOldSpace(old_map); |
- |
- ASSERT(heap()->map_space()->Contains(old_map)); |
- ASSERT(heap()->map_space()->Contains(forwarded)); |
#ifdef DEBUG |
- if (FLAG_gc_verbose) { |
- PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(), |
- forwarded); |
+ if (FLAG_verify_heap) { |
+ VerifyEvacuation(heap_); |
} |
#endif |
- // Update the map pointer. |
- obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded))); |
- // We have to compute the object size relying on the old map because |
- // map objects are not relocated yet. |
- int obj_size = obj->SizeFromMap(old_map); |
- |
- // Update pointers in the object body. |
- UpdatingVisitor updating_visitor(heap()); |
- obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor); |
- return obj_size; |
+ slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); |
+ ASSERT(migration_slots_buffer_ == NULL); |
+ for (int i = 0; i < npages; i++) { |
+ Page* p = evacuation_candidates_[i]; |
+ if (!p->IsEvacuationCandidate()) continue; |
+ PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
+ space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize); |
+ p->set_scan_on_scavenge(false); |
+ slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); |
+ p->ClearEvacuationCandidate(); |
+ } |
+ evacuation_candidates_.Rewind(0); |
+ compacting_ = false; |
} |
-int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) { |
- // Decode the map pointer. |
- MapWord encoding = obj->map_word(); |
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); |
- ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr))); |
+INLINE(static uint32_t SweepFree(PagedSpace* space, |
+ Page* p, |
+ uint32_t free_start, |
+ uint32_t region_end, |
+ uint32_t* cells)); |
- // At this point, the first word of map_addr is also encoded, cannot |
- // cast it to Map* using Map::cast. |
- Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)); |
- int obj_size = obj->SizeFromMap(map); |
- InstanceType type = map->instance_type(); |
- // Update map pointer. |
- Address new_map_addr = GetForwardingAddressInOldSpace(map); |
- int offset = encoding.DecodeOffset(); |
- obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset)); |
- |
-#ifdef DEBUG |
- if (FLAG_gc_verbose) { |
- PrintF("update %p : %p -> %p\n", obj->address(), |
- map_addr, new_map_addr); |
+static uint32_t SweepFree(PagedSpace* space, |
+ Page* p, |
+ uint32_t free_start, |
+ uint32_t region_end, |
+ uint32_t* cells) { |
+ uint32_t free_cell_index = Bitmap::IndexToCell(free_start); |
+ ASSERT(cells[free_cell_index] == 0); |
+ while (free_cell_index < region_end && cells[free_cell_index] == 0) { |
+ free_cell_index++; |
} |
-#endif |
- // Update pointers in the object body. |
- UpdatingVisitor updating_visitor(heap()); |
- obj->IterateBody(type, obj_size, &updating_visitor); |
- return obj_size; |
-} |
- |
- |
-Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) { |
- // Object should either in old or map space. |
- MapWord encoding = obj->map_word(); |
- |
- // Offset to the first live object's forwarding address. |
- int offset = encoding.DecodeOffset(); |
- Address obj_addr = obj->address(); |
- |
- // Find the first live object's forwarding address. |
- Page* p = Page::FromAddress(obj_addr); |
- Address first_forwarded = p->mc_first_forwarded; |
- |
- // Page start address of forwarded address. |
- Page* forwarded_page = Page::FromAddress(first_forwarded); |
- int forwarded_offset = forwarded_page->Offset(first_forwarded); |
- |
- // Find end of allocation in the page of first_forwarded. |
- int mc_top_offset = forwarded_page->AllocationWatermarkOffset(); |
- |
- // Check if current object's forward pointer is in the same page |
- // as the first live object's forwarding pointer |
- if (forwarded_offset + offset < mc_top_offset) { |
- // In the same page. |
- return first_forwarded + offset; |
+ if (free_cell_index >= region_end) { |
+ return free_cell_index; |
} |
- // Must be in the next page, NOTE: this may cross chunks. |
- Page* next_page = forwarded_page->next_page(); |
- ASSERT(next_page->is_valid()); |
+ uint32_t free_end = Bitmap::CellToIndex(free_cell_index); |
+ space->FreeOrUnmapPage(p, |
+ p->MarkbitIndexToAddress(free_start), |
+ (free_end - free_start) << kPointerSizeLog2); |
- offset -= (mc_top_offset - forwarded_offset); |
- offset += Page::kObjectStartOffset; |
- |
- ASSERT_PAGE_OFFSET(offset); |
- ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop()); |
- |
- return next_page->OffsetToAddress(offset); |
+ return free_cell_index; |
} |
-// ------------------------------------------------------------------------- |
-// Phase 4: Relocate objects |
+INLINE(static uint32_t NextCandidate(uint32_t cell_index, |
+ uint32_t last_cell_index, |
+ uint32_t* cells)); |
-void MarkCompactCollector::RelocateObjects() { |
-#ifdef DEBUG |
- ASSERT(state_ == UPDATE_POINTERS); |
- state_ = RELOCATE_OBJECTS; |
-#endif |
- // Relocates objects, always relocate map objects first. Relocating |
- // objects in other space relies on map objects to get object size. |
- int live_maps_size = IterateLiveObjects( |
- heap()->map_space(), &MarkCompactCollector::RelocateMapObject); |
- int live_pointer_olds_size = IterateLiveObjects( |
- heap()->old_pointer_space(), |
- &MarkCompactCollector::RelocateOldPointerObject); |
- int live_data_olds_size = IterateLiveObjects( |
- heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject); |
- int live_codes_size = IterateLiveObjects( |
- heap()->code_space(), &MarkCompactCollector::RelocateCodeObject); |
- int live_cells_size = IterateLiveObjects( |
- heap()->cell_space(), &MarkCompactCollector::RelocateCellObject); |
- int live_news_size = IterateLiveObjects( |
- heap()->new_space(), &MarkCompactCollector::RelocateNewObject); |
- USE(live_maps_size); |
- USE(live_pointer_olds_size); |
- USE(live_data_olds_size); |
- USE(live_codes_size); |
- USE(live_cells_size); |
- USE(live_news_size); |
- ASSERT(live_maps_size == live_map_objects_size_); |
- ASSERT(live_data_olds_size == live_old_data_objects_size_); |
- ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); |
- ASSERT(live_codes_size == live_code_objects_size_); |
- ASSERT(live_cells_size == live_cell_objects_size_); |
- ASSERT(live_news_size == live_young_objects_size_); |
- |
- // Flip from and to spaces |
- heap()->new_space()->Flip(); |
- |
- heap()->new_space()->MCCommitRelocationInfo(); |
- |
- // Set age_mark to bottom in to space |
- Address mark = heap()->new_space()->bottom(); |
- heap()->new_space()->set_age_mark(mark); |
- |
- PagedSpaces spaces; |
- for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next()) |
- space->MCCommitRelocationInfo(); |
- |
- heap()->CheckNewSpaceExpansionCriteria(); |
- heap()->IncrementYoungSurvivorsCounter(live_news_size); |
+static uint32_t NextCandidate(uint32_t cell_index, |
+ uint32_t last_cell_index, |
+ uint32_t* cells) { |
+ do { |
+ cell_index++; |
+ } while (cell_index < last_cell_index && cells[cell_index] != 0); |
+ return cell_index; |
} |
-int MarkCompactCollector::RelocateMapObject(HeapObject* obj) { |
- // Recover map pointer. |
- MapWord encoding = obj->map_word(); |
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); |
- ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr))); |
+static const int kStartTableEntriesPerLine = 5; |
+static const int kStartTableLines = 171; |
+static const int kStartTableInvalidLine = 127; |
+static const int kStartTableUnusedEntry = 126; |
- // Get forwarding address before resetting map pointer |
- Address new_addr = GetForwardingAddressInOldSpace(obj); |
+#define _ kStartTableUnusedEntry |
+#define X kStartTableInvalidLine |
+// Mark-bit to object start offset table. |
+// |
+// The line is indexed by the mark bits in a byte. The first number on |
+// the line describes the number of live object starts for the line and the |
+// other numbers on the line describe the offsets (in words) of the object |
+// starts. |
+// |
+// Since objects are at least 2 words large we don't have entries for two |
+// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits. |
+char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = { |
+ 0, _, _, _, _, // 0 |
+ 1, 0, _, _, _, // 1 |
+ 1, 1, _, _, _, // 2 |
+ X, _, _, _, _, // 3 |
+ 1, 2, _, _, _, // 4 |
+ 2, 0, 2, _, _, // 5 |
+ X, _, _, _, _, // 6 |
+ X, _, _, _, _, // 7 |
+ 1, 3, _, _, _, // 8 |
+ 2, 0, 3, _, _, // 9 |
+ 2, 1, 3, _, _, // 10 |
+ X, _, _, _, _, // 11 |
+ X, _, _, _, _, // 12 |
+ X, _, _, _, _, // 13 |
+ X, _, _, _, _, // 14 |
+ X, _, _, _, _, // 15 |
+ 1, 4, _, _, _, // 16 |
+ 2, 0, 4, _, _, // 17 |
+ 2, 1, 4, _, _, // 18 |
+ X, _, _, _, _, // 19 |
+ 2, 2, 4, _, _, // 20 |
+ 3, 0, 2, 4, _, // 21 |
+ X, _, _, _, _, // 22 |
+ X, _, _, _, _, // 23 |
+ X, _, _, _, _, // 24 |
+ X, _, _, _, _, // 25 |
+ X, _, _, _, _, // 26 |
+ X, _, _, _, _, // 27 |
+ X, _, _, _, _, // 28 |
+ X, _, _, _, _, // 29 |
+ X, _, _, _, _, // 30 |
+ X, _, _, _, _, // 31 |
+ 1, 5, _, _, _, // 32 |
+ 2, 0, 5, _, _, // 33 |
+ 2, 1, 5, _, _, // 34 |
+ X, _, _, _, _, // 35 |
+ 2, 2, 5, _, _, // 36 |
+ 3, 0, 2, 5, _, // 37 |
+ X, _, _, _, _, // 38 |
+ X, _, _, _, _, // 39 |
+ 2, 3, 5, _, _, // 40 |
+ 3, 0, 3, 5, _, // 41 |
+ 3, 1, 3, 5, _, // 42 |
+ X, _, _, _, _, // 43 |
+ X, _, _, _, _, // 44 |
+ X, _, _, _, _, // 45 |
+ X, _, _, _, _, // 46 |
+ X, _, _, _, _, // 47 |
+ X, _, _, _, _, // 48 |
+ X, _, _, _, _, // 49 |
+ X, _, _, _, _, // 50 |
+ X, _, _, _, _, // 51 |
+ X, _, _, _, _, // 52 |
+ X, _, _, _, _, // 53 |
+ X, _, _, _, _, // 54 |
+ X, _, _, _, _, // 55 |
+ X, _, _, _, _, // 56 |
+ X, _, _, _, _, // 57 |
+ X, _, _, _, _, // 58 |
+ X, _, _, _, _, // 59 |
+ X, _, _, _, _, // 60 |
+ X, _, _, _, _, // 61 |
+ X, _, _, _, _, // 62 |
+ X, _, _, _, _, // 63 |
+ 1, 6, _, _, _, // 64 |
+ 2, 0, 6, _, _, // 65 |
+ 2, 1, 6, _, _, // 66 |
+ X, _, _, _, _, // 67 |
+ 2, 2, 6, _, _, // 68 |
+ 3, 0, 2, 6, _, // 69 |
+ X, _, _, _, _, // 70 |
+ X, _, _, _, _, // 71 |
+ 2, 3, 6, _, _, // 72 |
+ 3, 0, 3, 6, _, // 73 |
+ 3, 1, 3, 6, _, // 74 |
+ X, _, _, _, _, // 75 |
+ X, _, _, _, _, // 76 |
+ X, _, _, _, _, // 77 |
+ X, _, _, _, _, // 78 |
+ X, _, _, _, _, // 79 |
+ 2, 4, 6, _, _, // 80 |
+ 3, 0, 4, 6, _, // 81 |
+ 3, 1, 4, 6, _, // 82 |
+ X, _, _, _, _, // 83 |
+ 3, 2, 4, 6, _, // 84 |
+ 4, 0, 2, 4, 6, // 85 |
+ X, _, _, _, _, // 86 |
+ X, _, _, _, _, // 87 |
+ X, _, _, _, _, // 88 |
+ X, _, _, _, _, // 89 |
+ X, _, _, _, _, // 90 |
+ X, _, _, _, _, // 91 |
+ X, _, _, _, _, // 92 |
+ X, _, _, _, _, // 93 |
+ X, _, _, _, _, // 94 |
+ X, _, _, _, _, // 95 |
+ X, _, _, _, _, // 96 |
+ X, _, _, _, _, // 97 |
+ X, _, _, _, _, // 98 |
+ X, _, _, _, _, // 99 |
+ X, _, _, _, _, // 100 |
+ X, _, _, _, _, // 101 |
+ X, _, _, _, _, // 102 |
+ X, _, _, _, _, // 103 |
+ X, _, _, _, _, // 104 |
+ X, _, _, _, _, // 105 |
+ X, _, _, _, _, // 106 |
+ X, _, _, _, _, // 107 |
+ X, _, _, _, _, // 108 |
+ X, _, _, _, _, // 109 |
+ X, _, _, _, _, // 110 |
+ X, _, _, _, _, // 111 |
+ X, _, _, _, _, // 112 |
+ X, _, _, _, _, // 113 |
+ X, _, _, _, _, // 114 |
+ X, _, _, _, _, // 115 |
+ X, _, _, _, _, // 116 |
+ X, _, _, _, _, // 117 |
+ X, _, _, _, _, // 118 |
+ X, _, _, _, _, // 119 |
+ X, _, _, _, _, // 120 |
+ X, _, _, _, _, // 121 |
+ X, _, _, _, _, // 122 |
+ X, _, _, _, _, // 123 |
+ X, _, _, _, _, // 124 |
+ X, _, _, _, _, // 125 |
+ X, _, _, _, _, // 126 |
+ X, _, _, _, _, // 127 |
+ 1, 7, _, _, _, // 128 |
+ 2, 0, 7, _, _, // 129 |
+ 2, 1, 7, _, _, // 130 |
+ X, _, _, _, _, // 131 |
+ 2, 2, 7, _, _, // 132 |
+ 3, 0, 2, 7, _, // 133 |
+ X, _, _, _, _, // 134 |
+ X, _, _, _, _, // 135 |
+ 2, 3, 7, _, _, // 136 |
+ 3, 0, 3, 7, _, // 137 |
+ 3, 1, 3, 7, _, // 138 |
+ X, _, _, _, _, // 139 |
+ X, _, _, _, _, // 140 |
+ X, _, _, _, _, // 141 |
+ X, _, _, _, _, // 142 |
+ X, _, _, _, _, // 143 |
+ 2, 4, 7, _, _, // 144 |
+ 3, 0, 4, 7, _, // 145 |
+ 3, 1, 4, 7, _, // 146 |
+ X, _, _, _, _, // 147 |
+ 3, 2, 4, 7, _, // 148 |
+ 4, 0, 2, 4, 7, // 149 |
+ X, _, _, _, _, // 150 |
+ X, _, _, _, _, // 151 |
+ X, _, _, _, _, // 152 |
+ X, _, _, _, _, // 153 |
+ X, _, _, _, _, // 154 |
+ X, _, _, _, _, // 155 |
+ X, _, _, _, _, // 156 |
+ X, _, _, _, _, // 157 |
+ X, _, _, _, _, // 158 |
+ X, _, _, _, _, // 159 |
+ 2, 5, 7, _, _, // 160 |
+ 3, 0, 5, 7, _, // 161 |
+ 3, 1, 5, 7, _, // 162 |
+ X, _, _, _, _, // 163 |
+ 3, 2, 5, 7, _, // 164 |
+ 4, 0, 2, 5, 7, // 165 |
+ X, _, _, _, _, // 166 |
+ X, _, _, _, _, // 167 |
+ 3, 3, 5, 7, _, // 168 |
+ 4, 0, 3, 5, 7, // 169 |
+ 4, 1, 3, 5, 7 // 170 |
+}; |
+#undef _ |
+#undef X |
- // Reset map pointer. The meta map object may not be copied yet so |
- // Map::cast does not yet work. |
- obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr))); |
- Address old_addr = obj->address(); |
+// Takes a word of mark bits. Returns the number of objects that start in the |
+// range. Puts the offsets of the words in the supplied array. |
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) { |
+ int objects = 0; |
+ int offset = 0; |
- if (new_addr != old_addr) { |
- // Move contents. |
- heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr, |
- old_addr, |
- Map::kSize); |
- } |
+ // No consecutive 1 bits. |
+ ASSERT((mark_bits & 0x180) != 0x180); |
+ ASSERT((mark_bits & 0x18000) != 0x18000); |
+ ASSERT((mark_bits & 0x1800000) != 0x1800000); |
-#ifdef DEBUG |
- if (FLAG_gc_verbose) { |
- PrintF("relocate %p -> %p\n", old_addr, new_addr); |
+ while (mark_bits != 0) { |
+ int byte = (mark_bits & 0xff); |
+ mark_bits >>= 8; |
+ if (byte != 0) { |
+ ASSERT(byte < kStartTableLines); // No consecutive 1 bits. |
+ char* table = kStartTable + byte * kStartTableEntriesPerLine; |
+ int objects_in_these_8_words = table[0]; |
+ ASSERT(objects_in_these_8_words != kStartTableInvalidLine); |
+ ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine); |
+ for (int i = 0; i < objects_in_these_8_words; i++) { |
+ starts[objects++] = offset + table[1 + i]; |
+ } |
+ } |
+ offset += 8; |
} |
-#endif |
- |
- return Map::kSize; |
+ return objects; |
} |
-static inline int RestoreMap(HeapObject* obj, |
- PagedSpace* space, |
- Address new_addr, |
- Address map_addr) { |
- // This must be a non-map object, and the function relies on the |
- // assumption that the Map space is compacted before the other paged |
- // spaces (see RelocateObjects). |
+static inline Address DigestFreeStart(Address approximate_free_start, |
+ uint32_t free_start_cell) { |
+ ASSERT(free_start_cell != 0); |
- // Reset map pointer. |
- obj->set_map(Map::cast(HeapObject::FromAddress(map_addr))); |
+ int offsets[16]; |
+ uint32_t cell = free_start_cell; |
+ int offset_of_last_live; |
+ if ((cell & 0x80000000u) != 0) { |
+ // This case would overflow below. |
+ offset_of_last_live = 31; |
+ } else { |
+ // Remove all but one bit, the most significant. This is an optimization |
+ // that may or may not be worthwhile. |
+ cell |= cell >> 16; |
+ cell |= cell >> 8; |
+ cell |= cell >> 4; |
+ cell |= cell >> 2; |
+ cell |= cell >> 1; |
+ cell = (cell + 1) >> 1; |
+ int live_objects = MarkWordToObjectStarts(cell, offsets); |
+ ASSERT(live_objects == 1); |
+ offset_of_last_live = offsets[live_objects - 1]; |
+ } |
+ Address last_live_start = |
+ approximate_free_start + offset_of_last_live * kPointerSize; |
+ HeapObject* last_live = HeapObject::FromAddress(last_live_start); |
+ Address free_start = last_live_start + last_live->Size(); |
+ return free_start; |
+} |
- int obj_size = obj->Size(); |
- ASSERT_OBJECT_SIZE(obj_size); |
- ASSERT(space->MCSpaceOffsetForAddress(new_addr) <= |
- space->MCSpaceOffsetForAddress(obj->address())); |
+static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { |
+ ASSERT(cell != 0); |
-#ifdef DEBUG |
- if (FLAG_gc_verbose) { |
- PrintF("relocate %p -> %p\n", obj->address(), new_addr); |
+ int offsets[16]; |
+ if (cell == 0x80000000u) { // Avoid overflow below. |
+ return block_address + 31 * kPointerSize; |
} |
-#endif |
- |
- return obj_size; |
+ uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; |
+ ASSERT((first_set_bit & cell) == first_set_bit); |
+ int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); |
+ ASSERT(live_objects == 1); |
+ USE(live_objects); |
+ return block_address + offsets[0] * kPointerSize; |
} |
-int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj, |
- PagedSpace* space) { |
- // Recover map pointer. |
- MapWord encoding = obj->map_word(); |
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); |
- ASSERT(heap()->map_space()->Contains(map_addr)); |
+// Sweeps a space conservatively. After this has been done the larger free |
+// spaces have been put on the free list and the smaller ones have been |
+// ignored and left untouched. A free space is always either ignored or put |
+// on the free list, never split up into two parts. This is important |
+// because it means that any FreeSpace maps left actually describe a region of |
+// memory that can be ignored when scanning. Dead objects other than free |
+// spaces will not contain the free space map. |
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
+ ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); |
+ MarkBit::CellType* cells = p->markbits()->cells(); |
+ p->MarkSweptConservatively(); |
- // Get forwarding address before resetting map pointer. |
- Address new_addr = GetForwardingAddressInOldSpace(obj); |
+ int last_cell_index = |
+ Bitmap::IndexToCell( |
+ Bitmap::CellAlignIndex( |
+ p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); |
- // Reset the map pointer. |
- int obj_size = RestoreMap(obj, space, new_addr, map_addr); |
+ int cell_index = Page::kFirstUsedCell; |
+ intptr_t freed_bytes = 0; |
- Address old_addr = obj->address(); |
+ // This is the start of the 32 word block that we are currently looking at. |
+ Address block_address = p->ObjectAreaStart(); |
- if (new_addr != old_addr) { |
- // Move contents. |
- if (space == heap()->old_data_space()) { |
- heap()->MoveBlock(new_addr, old_addr, obj_size); |
- } else { |
- heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr, |
- old_addr, |
- obj_size); |
+ // Skip over all the dead objects at the start of the page and mark them free. |
+ for (cell_index = Page::kFirstUsedCell; |
+ cell_index < last_cell_index; |
+ cell_index++, block_address += 32 * kPointerSize) { |
+ if (cells[cell_index] != 0) break; |
+ } |
+ size_t size = block_address - p->ObjectAreaStart(); |
+ if (cell_index == last_cell_index) { |
+ freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(), |
+ static_cast<int>(size))); |
+ return freed_bytes; |
+ } |
+ // Grow the size of the start-of-page free space a little to get up to the |
+ // first live object. |
+ Address free_end = StartOfLiveObject(block_address, cells[cell_index]); |
+ // Free the first free space. |
+ size = free_end - p->ObjectAreaStart(); |
+ freed_bytes += space->Free(p->ObjectAreaStart(), |
+ static_cast<int>(size)); |
+ // The start of the current free area is represented in undigested form by |
+ // the address of the last 32-word section that contained a live object and |
+ // the marking bitmap for that cell, which describes where the live object |
+ // started. Unless we find a large free space in the bitmap we will not |
+ // digest this pair into a real address. We start the iteration here at the |
+ // first word in the marking bit map that indicates a live object. |
+ Address free_start = block_address; |
+ uint32_t free_start_cell = cells[cell_index]; |
+ |
+ for ( ; |
+ cell_index < last_cell_index; |
+ cell_index++, block_address += 32 * kPointerSize) { |
+ ASSERT((unsigned)cell_index == |
+ Bitmap::IndexToCell( |
+ Bitmap::CellAlignIndex( |
+ p->AddressToMarkbitIndex(block_address)))); |
+ uint32_t cell = cells[cell_index]; |
+ if (cell != 0) { |
+ // We have a live object. Check approximately whether it is more than 32 |
+ // words since the last live object. |
+ if (block_address - free_start > 32 * kPointerSize) { |
+ free_start = DigestFreeStart(free_start, free_start_cell); |
+ if (block_address - free_start > 32 * kPointerSize) { |
+ // Now that we know the exact start of the free space it still looks |
+ // like we have a large enough free space to be worth bothering with. |
+ // so now we need to find the start of the first live object at the |
+ // end of the free space. |
+ free_end = StartOfLiveObject(block_address, cell); |
+ freed_bytes += space->Free(free_start, |
+ static_cast<int>(free_end - free_start)); |
+ } |
+ } |
+ // Update our undigested record of where the current free area started. |
+ free_start = block_address; |
+ free_start_cell = cell; |
+ // Clear marking bits for current cell. |
+ cells[cell_index] = 0; |
} |
} |
- ASSERT(!HeapObject::FromAddress(new_addr)->IsCode()); |
- |
- HeapObject* copied_to = HeapObject::FromAddress(new_addr); |
- if (copied_to->IsSharedFunctionInfo()) { |
- PROFILE(heap()->isolate(), |
- SharedFunctionInfoMoveEvent(old_addr, new_addr)); |
+ // Handle the free space at the end of the page. |
+ if (block_address - free_start > 32 * kPointerSize) { |
+ free_start = DigestFreeStart(free_start, free_start_cell); |
+ freed_bytes += space->Free(free_start, |
+ static_cast<int>(block_address - free_start)); |
} |
- HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr)); |
- return obj_size; |
+ return freed_bytes; |
} |
-int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) { |
- return RelocateOldNonCodeObject(obj, heap()->old_pointer_space()); |
-} |
+void MarkCompactCollector::SweepSpace(PagedSpace* space, |
+ SweeperType sweeper) { |
+ space->set_was_swept_conservatively(sweeper == CONSERVATIVE || |
+ sweeper == LAZY_CONSERVATIVE); |
+ space->ClearStats(); |
-int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) { |
- return RelocateOldNonCodeObject(obj, heap()->old_data_space()); |
-} |
+ PageIterator it(space); |
+ intptr_t freed_bytes = 0; |
+ intptr_t newspace_size = space->heap()->new_space()->Size(); |
+ bool lazy_sweeping_active = false; |
-int MarkCompactCollector::RelocateCellObject(HeapObject* obj) { |
- return RelocateOldNonCodeObject(obj, heap()->cell_space()); |
-} |
+ while (it.has_next()) { |
+ Page* p = it.next(); |
+ // Clear sweeping flags indicating that marking bits are still intact. |
+ p->ClearSweptPrecisely(); |
+ p->ClearSweptConservatively(); |
-int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) { |
- // Recover map pointer. |
- MapWord encoding = obj->map_word(); |
- Address map_addr = encoding.DecodeMapAddress(heap()->map_space()); |
- ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr))); |
+ if (p->IsEvacuationCandidate()) { |
+ ASSERT(evacuation_candidates_.length() > 0); |
+ continue; |
+ } |
- // Get forwarding address before resetting map pointer |
- Address new_addr = GetForwardingAddressInOldSpace(obj); |
+ if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
+ // Will be processed in EvacuateNewSpaceAndCandidates. |
+ continue; |
+ } |
- // Reset the map pointer. |
- int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr); |
+ if (lazy_sweeping_active) { |
+ if (FLAG_gc_verbose) { |
+ PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n", |
+ reinterpret_cast<intptr_t>(p)); |
+ } |
+ continue; |
+ } |
- Address old_addr = obj->address(); |
+ if (FLAG_gc_verbose) { |
+ PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n", |
+ reinterpret_cast<intptr_t>(p), |
+ sweeper); |
+ } |
- if (new_addr != old_addr) { |
- // Move contents. |
- heap()->MoveBlock(new_addr, old_addr, obj_size); |
+ switch (sweeper) { |
+ case CONSERVATIVE: { |
+ SweepConservatively(space, p); |
+ break; |
+ } |
+ case LAZY_CONSERVATIVE: { |
+ freed_bytes += SweepConservatively(space, p); |
+ if (freed_bytes >= newspace_size && p != space->LastPage()) { |
+ space->SetPagesToSweep(p->next_page(), space->LastPage()); |
+ lazy_sweeping_active = true; |
+ } |
+ break; |
+ } |
+ case PRECISE: { |
+ if (space->identity() == CODE_SPACE) { |
+ SweepPrecisely<REBUILD_SKIP_LIST>(space, p, SWEEP_ONLY, NULL); |
+ } else { |
+ SweepPrecisely<IGNORE_SKIP_LIST>(space, p, SWEEP_ONLY, NULL); |
+ } |
+ break; |
+ } |
+ default: { |
+ UNREACHABLE(); |
+ } |
+ } |
} |
- |
- HeapObject* copied_to = HeapObject::FromAddress(new_addr); |
- if (copied_to->IsCode()) { |
- // May also update inline cache target. |
- Code::cast(copied_to)->Relocate(new_addr - old_addr); |
- // Notify the logger that compiled code has moved. |
- PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr)); |
- } |
- HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr)); |
- |
- return obj_size; |
} |
-int MarkCompactCollector::RelocateNewObject(HeapObject* obj) { |
- int obj_size = obj->Size(); |
- |
- // Get forwarding address |
- Address old_addr = obj->address(); |
- int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr); |
- |
- Address new_addr = |
- Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset); |
- |
+void MarkCompactCollector::SweepSpaces() { |
+ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); |
#ifdef DEBUG |
- if (heap()->new_space()->FromSpaceContains(new_addr)) { |
- ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <= |
- heap()->new_space()->ToSpaceOffsetForAddress(old_addr)); |
- } else { |
- ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() || |
- heap()->TargetSpace(obj) == heap()->old_data_space()); |
- } |
+ state_ = SWEEP_SPACES; |
#endif |
- |
- // New and old addresses cannot overlap. |
- if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) { |
- heap()->CopyBlock(new_addr, old_addr, obj_size); |
- } else { |
- heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr, |
- old_addr, |
- obj_size); |
+ SweeperType how_to_sweep = |
+ FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; |
+ if (sweep_precisely_) how_to_sweep = PRECISE; |
+ // Noncompacting collections simply sweep the spaces to clear the mark |
+ // bits and free the nonlive blocks (for old and map spaces). We sweep |
+ // the map space last because freeing non-live maps overwrites them and |
+ // the other spaces rely on possibly non-live maps to get the sizes for |
+ // non-live objects. |
+ SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
+ SweepSpace(heap()->old_data_space(), how_to_sweep); |
+ SweepSpace(heap()->code_space(), PRECISE); |
+ SweepSpace(heap()->cell_space(), PRECISE); |
+ { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); |
+ EvacuateNewSpaceAndCandidates(); |
} |
+ // ClearNonLiveTransitions depends on precise sweeping of map space to |
+ // detect whether unmarked map became dead in this collection or in one |
+ // of the previous ones. |
+ SweepSpace(heap()->map_space(), PRECISE); |
-#ifdef DEBUG |
- if (FLAG_gc_verbose) { |
- PrintF("relocate %p -> %p\n", old_addr, new_addr); |
- } |
-#endif |
+ ASSERT(live_map_objects_size_ <= heap()->map_space()->Size()); |
- HeapObject* copied_to = HeapObject::FromAddress(new_addr); |
- if (copied_to->IsSharedFunctionInfo()) { |
- PROFILE(heap()->isolate(), |
- SharedFunctionInfoMoveEvent(old_addr, new_addr)); |
- } |
- HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr)); |
- |
- return obj_size; |
+ // Deallocate unmarked objects and clear marked bits for marked objects. |
+ heap_->lo_space()->FreeUnmarkedObjects(); |
} |
+// TODO(1466) ReportDeleteIfNeeded is not called currently. |
+// Our profiling tools do not expect intersections between |
+// code objects. We should either reenable it or change our tools. |
void MarkCompactCollector::EnableCodeFlushing(bool enable) { |
if (enable) { |
if (code_flusher_ != NULL) return; |
@@ -3347,17 +3501,126 @@ |
} |
-int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) { |
- MapWord map_word = obj->map_word(); |
- map_word.ClearMark(); |
- return obj->SizeFromMap(map_word.ToMap()); |
+void MarkCompactCollector::Initialize() { |
+ StaticMarkingVisitor::Initialize(); |
} |
-void MarkCompactCollector::Initialize() { |
- StaticPointersToNewGenUpdatingVisitor::Initialize(); |
- StaticMarkingVisitor::Initialize(); |
+bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) { |
+ return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES; |
} |
+bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator, |
+ SlotsBuffer** buffer_address, |
+ SlotType type, |
+ Address addr, |
+ AdditionMode mode) { |
+ SlotsBuffer* buffer = *buffer_address; |
+ if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) { |
+ if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { |
+ allocator->DeallocateChain(buffer_address); |
+ return false; |
+ } |
+ buffer = allocator->AllocateBuffer(buffer); |
+ *buffer_address = buffer; |
+ } |
+ ASSERT(buffer->HasSpaceForTypedSlot()); |
+ buffer->Add(reinterpret_cast<ObjectSlot>(type)); |
+ buffer->Add(reinterpret_cast<ObjectSlot>(addr)); |
+ return true; |
+} |
+ |
+ |
+static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { |
+ if (RelocInfo::IsCodeTarget(rmode)) { |
+ return SlotsBuffer::CODE_TARGET_SLOT; |
+ } else if (RelocInfo::IsDebugBreakSlot(rmode)) { |
+ return SlotsBuffer::DEBUG_TARGET_SLOT; |
+ } else if (RelocInfo::IsJSReturn(rmode)) { |
+ return SlotsBuffer::JS_RETURN_SLOT; |
+ } |
+ UNREACHABLE(); |
+ return SlotsBuffer::NUMBER_OF_SLOT_TYPES; |
+} |
+ |
+ |
+void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Code* target) { |
+ Page* target_page = Page::FromAddress( |
+ reinterpret_cast<Address>(target)); |
+ if (target_page->IsEvacuationCandidate() && |
+ (rinfo->host() == NULL || |
+ !ShouldSkipEvacuationSlotRecording(rinfo->host()))) { |
+ if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, |
+ target_page->slots_buffer_address(), |
+ SlotTypeForRMode(rinfo->rmode()), |
+ rinfo->pc(), |
+ SlotsBuffer::FAIL_ON_OVERFLOW)) { |
+ EvictEvacuationCandidate(target_page); |
+ } |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) { |
+ Page* target_page = Page::FromAddress( |
+ reinterpret_cast<Address>(target)); |
+ if (target_page->IsEvacuationCandidate() && |
+ !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) { |
+ if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, |
+ target_page->slots_buffer_address(), |
+ SlotsBuffer::CODE_ENTRY_SLOT, |
+ slot, |
+ SlotsBuffer::FAIL_ON_OVERFLOW)) { |
+ EvictEvacuationCandidate(target_page); |
+ } |
+ } |
+} |
+ |
+ |
+static inline SlotsBuffer::SlotType DecodeSlotType( |
+ SlotsBuffer::ObjectSlot slot) { |
+ return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot)); |
+} |
+ |
+ |
+void SlotsBuffer::UpdateSlots(Heap* heap) { |
+ PointersUpdatingVisitor v(heap); |
+ |
+ for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { |
+ ObjectSlot slot = slots_[slot_idx]; |
+ if (!IsTypedSlot(slot)) { |
+ UpdateSlot(slot); |
+ } else { |
+ ++slot_idx; |
+ ASSERT(slot_idx < idx_); |
+ UpdateSlot(&v, |
+ DecodeSlotType(slot), |
+ reinterpret_cast<Address>(slots_[slot_idx])); |
+ } |
+ } |
+} |
+ |
+ |
+SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { |
+ return new SlotsBuffer(next_buffer); |
+} |
+ |
+ |
+void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { |
+ delete buffer; |
+} |
+ |
+ |
+void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { |
+ SlotsBuffer* buffer = *buffer_address; |
+ while (buffer != NULL) { |
+ SlotsBuffer* next_buffer = buffer->next(); |
+ DeallocateBuffer(buffer); |
+ buffer = next_buffer; |
+ } |
+ *buffer_address = NULL; |
+} |
+ |
+ |
} } // namespace v8::internal |