Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index 917e87d75f3221828086686fac15de5e86e391a1..122de1d87dd241ba00e5bffc371590b18d1d6418 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -984,1150 +984,18 @@ |
} |
-void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) { |
- // Make sure previous flushing decisions are revisited. |
- isolate_->heap()->incremental_marking()->RecordWrites(shared_info); |
- |
- if (FLAG_trace_code_flushing) { |
- PrintF("[code-flushing abandons function-info: "); |
- shared_info->ShortPrint(); |
- PrintF("]\n"); |
- } |
- |
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_; |
- SharedFunctionInfo* next_candidate; |
- if (candidate == shared_info) { |
- next_candidate = GetNextCandidate(shared_info); |
- shared_function_info_candidates_head_ = next_candidate; |
- ClearNextCandidate(shared_info); |
- } else { |
- while (candidate != NULL) { |
- next_candidate = GetNextCandidate(candidate); |
- |
- if (next_candidate == shared_info) { |
- next_candidate = GetNextCandidate(shared_info); |
- SetNextCandidate(candidate, next_candidate); |
- ClearNextCandidate(shared_info); |
- break; |
- } |
- |
- candidate = next_candidate; |
- } |
- } |
-} |
- |
- |
-void CodeFlusher::EvictCandidate(JSFunction* function) { |
- DCHECK(!function->next_function_link()->IsUndefined()); |
- Object* undefined = isolate_->heap()->undefined_value(); |
- |
- // Make sure previous flushing decisions are revisited. |
- isolate_->heap()->incremental_marking()->RecordWrites(function); |
- isolate_->heap()->incremental_marking()->RecordWrites(function->shared()); |
- |
- if (FLAG_trace_code_flushing) { |
- PrintF("[code-flushing abandons closure: "); |
- function->shared()->ShortPrint(); |
- PrintF("]\n"); |
- } |
- |
- JSFunction* candidate = jsfunction_candidates_head_; |
- JSFunction* next_candidate; |
- if (candidate == function) { |
- next_candidate = GetNextCandidate(function); |
- jsfunction_candidates_head_ = next_candidate; |
- ClearNextCandidate(function, undefined); |
- } else { |
- while (candidate != NULL) { |
- next_candidate = GetNextCandidate(candidate); |
- |
- if (next_candidate == function) { |
- next_candidate = GetNextCandidate(function); |
- SetNextCandidate(candidate, next_candidate); |
- ClearNextCandidate(function, undefined); |
- break; |
- } |
- |
- candidate = next_candidate; |
- } |
- } |
-} |
- |
- |
-void CodeFlusher::EvictJSFunctionCandidates() { |
- JSFunction* candidate = jsfunction_candidates_head_; |
- JSFunction* next_candidate; |
- while (candidate != NULL) { |
- next_candidate = GetNextCandidate(candidate); |
- EvictCandidate(candidate); |
- candidate = next_candidate; |
- } |
- DCHECK(jsfunction_candidates_head_ == NULL); |
-} |
- |
- |
-void CodeFlusher::EvictSharedFunctionInfoCandidates() { |
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_; |
- SharedFunctionInfo* next_candidate; |
- while (candidate != NULL) { |
- next_candidate = GetNextCandidate(candidate); |
- EvictCandidate(candidate); |
- candidate = next_candidate; |
- } |
- DCHECK(shared_function_info_candidates_head_ == NULL); |
-} |
- |
- |
-void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) { |
- Heap* heap = isolate_->heap(); |
- |
- JSFunction** slot = &jsfunction_candidates_head_; |
- JSFunction* candidate = jsfunction_candidates_head_; |
- while (candidate != NULL) { |
- if (heap->InFromSpace(candidate)) { |
- v->VisitPointer(reinterpret_cast<Object**>(slot)); |
- } |
- candidate = GetNextCandidate(*slot); |
- slot = GetNextCandidateSlot(*slot); |
- } |
-} |
- |
- |
-MarkCompactCollector::~MarkCompactCollector() { |
- if (code_flusher_ != NULL) { |
- delete code_flusher_; |
- code_flusher_ = NULL; |
- } |
-} |
- |
- |
-class MarkCompactMarkingVisitor |
- : public StaticMarkingVisitor<MarkCompactMarkingVisitor> { |
- public: |
- static void Initialize(); |
- |
- INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) { |
- MarkObjectByPointer(heap->mark_compact_collector(), object, p); |
- } |
- |
- INLINE(static void VisitPointers(Heap* heap, HeapObject* object, |
- Object** start, Object** end)) { |
- // Mark all objects pointed to in [start, end). |
- const int kMinRangeForMarkingRecursion = 64; |
- if (end - start >= kMinRangeForMarkingRecursion) { |
- if (VisitUnmarkedObjects(heap, object, start, end)) return; |
- // We are close to a stack overflow, so just mark the objects. |
- } |
- MarkCompactCollector* collector = heap->mark_compact_collector(); |
- for (Object** p = start; p < end; p++) { |
- MarkObjectByPointer(collector, object, p); |
- } |
- } |
- |
- // Marks the object black and pushes it on the marking stack. |
- INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { |
- MarkBit mark = Marking::MarkBitFrom(object); |
- heap->mark_compact_collector()->MarkObject(object, mark); |
- } |
- |
- // Marks the object black without pushing it on the marking stack. |
- // Returns true if object needed marking and false otherwise. |
- INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) { |
- MarkBit mark_bit = Marking::MarkBitFrom(object); |
- if (Marking::IsWhite(mark_bit)) { |
- heap->mark_compact_collector()->SetMark(object, mark_bit); |
- return true; |
- } |
- return false; |
- } |
- |
- // Mark object pointed to by p. |
- INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, |
- HeapObject* object, Object** p)) { |
- if (!(*p)->IsHeapObject()) return; |
- HeapObject* target_object = HeapObject::cast(*p); |
- collector->RecordSlot(object, p, target_object); |
- MarkBit mark = Marking::MarkBitFrom(target_object); |
- collector->MarkObject(target_object, mark); |
- } |
- |
- |
- // Visit an unmarked object. |
- INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, |
- HeapObject* obj)) { |
-#ifdef DEBUG |
- DCHECK(collector->heap()->Contains(obj)); |
- DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj)); |
-#endif |
- Map* map = obj->map(); |
- Heap* heap = obj->GetHeap(); |
- MarkBit mark = Marking::MarkBitFrom(obj); |
- heap->mark_compact_collector()->SetMark(obj, mark); |
- // Mark the map pointer and the body. |
- MarkBit map_mark = Marking::MarkBitFrom(map); |
- heap->mark_compact_collector()->MarkObject(map, map_mark); |
- IterateBody(map, obj); |
- } |
- |
- // Visit all unmarked objects pointed to by [start, end). |
- // Returns false if the operation fails (lack of stack space). |
- INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object, |
- Object** start, Object** end)) { |
- // Return false is we are close to the stack limit. |
- StackLimitCheck check(heap->isolate()); |
- if (check.HasOverflowed()) return false; |
- |
- MarkCompactCollector* collector = heap->mark_compact_collector(); |
- // Visit the unmarked objects. |
- for (Object** p = start; p < end; p++) { |
- Object* o = *p; |
- if (!o->IsHeapObject()) continue; |
- collector->RecordSlot(object, p, o); |
- HeapObject* obj = HeapObject::cast(o); |
- MarkBit mark = Marking::MarkBitFrom(obj); |
- if (Marking::IsBlackOrGrey(mark)) continue; |
- VisitUnmarkedObject(collector, obj); |
- } |
- return true; |
- } |
- |
- private: |
- template <int id> |
- static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj); |
- |
- // Code flushing support. |
- |
- static const int kRegExpCodeThreshold = 5; |
- |
- static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re, |
- bool is_one_byte) { |
- // Make sure that the fixed array is in fact initialized on the RegExp. |
- // We could potentially trigger a GC when initializing the RegExp. |
- if (HeapObject::cast(re->data())->map()->instance_type() != |
- FIXED_ARRAY_TYPE) |
- return; |
- |
- // Make sure this is a RegExp that actually contains code. |
- if (re->TypeTag() != JSRegExp::IRREGEXP) return; |
- |
- Object* code = re->DataAt(JSRegExp::code_index(is_one_byte)); |
- if (!code->IsSmi() && |
- HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) { |
- // Save a copy that can be reinstated if we need the code again. |
- re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code); |
- |
- // Saving a copy might create a pointer into compaction candidate |
- // that was not observed by marker. This might happen if JSRegExp data |
- // was marked through the compilation cache before marker reached JSRegExp |
- // object. |
- FixedArray* data = FixedArray::cast(re->data()); |
- Object** slot = |
- data->data_start() + JSRegExp::saved_code_index(is_one_byte); |
- heap->mark_compact_collector()->RecordSlot(data, slot, code); |
- |
- // Set a number in the 0-255 range to guarantee no smi overflow. |
- re->SetDataAt(JSRegExp::code_index(is_one_byte), |
- Smi::FromInt(heap->ms_count() & 0xff)); |
- } else if (code->IsSmi()) { |
- int value = Smi::cast(code)->value(); |
- // The regexp has not been compiled yet or there was a compilation error. |
- if (value == JSRegExp::kUninitializedValue || |
- value == JSRegExp::kCompilationErrorValue) { |
- return; |
- } |
- |
- // Check if we should flush now. |
- if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) { |
- re->SetDataAt(JSRegExp::code_index(is_one_byte), |
- Smi::FromInt(JSRegExp::kUninitializedValue)); |
- re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), |
- Smi::FromInt(JSRegExp::kUninitializedValue)); |
- } |
- } |
- } |
- |
- |
- // Works by setting the current sweep_generation (as a smi) in the |
- // code object place in the data array of the RegExp and keeps a copy |
- // around that can be reinstated if we reuse the RegExp before flushing. |
- // If we did not use the code for kRegExpCodeThreshold mark sweep GCs |
- // we flush the code. |
- static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) { |
- Heap* heap = map->GetHeap(); |
- MarkCompactCollector* collector = heap->mark_compact_collector(); |
- if (!collector->is_code_flushing_enabled()) { |
- VisitJSRegExp(map, object); |
- return; |
- } |
- JSRegExp* re = reinterpret_cast<JSRegExp*>(object); |
- // Flush code or set age on both one byte and two byte code. |
- UpdateRegExpCodeAgeAndFlush(heap, re, true); |
- UpdateRegExpCodeAgeAndFlush(heap, re, false); |
- // Visit the fields of the RegExp, including the updated FixedArray. |
- VisitJSRegExp(map, object); |
- } |
-}; |
- |
- |
-void MarkCompactMarkingVisitor::Initialize() { |
- StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize(); |
- |
- table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode); |
- |
- if (FLAG_track_gc_object_stats) { |
- ObjectStatsVisitor::Initialize(&table_); |
- } |
-} |
- |
- |
-class CodeMarkingVisitor : public ThreadVisitor { |
- public: |
- explicit CodeMarkingVisitor(MarkCompactCollector* collector) |
- : collector_(collector) {} |
- |
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) { |
- collector_->PrepareThreadForCodeFlushing(isolate, top); |
- } |
- |
- private: |
- MarkCompactCollector* collector_; |
-}; |
- |
- |
-class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { |
- public: |
- explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) |
- : collector_(collector) {} |
- |
- void VisitPointers(Object** start, Object** end) override { |
- for (Object** p = start; p < end; p++) VisitPointer(p); |
- } |
- |
- void VisitPointer(Object** slot) override { |
- Object* obj = *slot; |
- if (obj->IsSharedFunctionInfo()) { |
- SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); |
- MarkBit shared_mark = Marking::MarkBitFrom(shared); |
- MarkBit code_mark = Marking::MarkBitFrom(shared->code()); |
- collector_->MarkObject(shared->code(), code_mark); |
- collector_->MarkObject(shared, shared_mark); |
- } |
- } |
- |
- private: |
- MarkCompactCollector* collector_; |
-}; |
- |
- |
-void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate, |
- ThreadLocalTop* top) { |
- for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { |
- // Note: for the frame that has a pending lazy deoptimization |
- // StackFrame::unchecked_code will return a non-optimized code object for |
- // the outermost function and StackFrame::LookupCode will return |
- // actual optimized code object. |
- StackFrame* frame = it.frame(); |
- Code* code = frame->unchecked_code(); |
- MarkBit code_mark = Marking::MarkBitFrom(code); |
- MarkObject(code, code_mark); |
- if (frame->is_optimized()) { |
- MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(), |
- frame->LookupCode()); |
- } |
- } |
-} |
- |
- |
-void MarkCompactCollector::PrepareForCodeFlushing() { |
- // If code flushing is disabled, there is no need to prepare for it. |
- if (!is_code_flushing_enabled()) return; |
- |
- // Ensure that empty descriptor array is marked. Method MarkDescriptorArray |
- // relies on it being marked before any other descriptor array. |
- HeapObject* descriptor_array = heap()->empty_descriptor_array(); |
- MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array); |
- MarkObject(descriptor_array, descriptor_array_mark); |
- |
- // Make sure we are not referencing the code from the stack. |
- DCHECK(this == heap()->mark_compact_collector()); |
- PrepareThreadForCodeFlushing(heap()->isolate(), |
- heap()->isolate()->thread_local_top()); |
- |
- // Iterate the archived stacks in all threads to check if |
- // the code is referenced. |
- CodeMarkingVisitor code_marking_visitor(this); |
- heap()->isolate()->thread_manager()->IterateArchivedThreads( |
- &code_marking_visitor); |
- |
- SharedFunctionInfoMarkingVisitor visitor(this); |
- heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); |
- heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); |
- |
- ProcessMarkingDeque(); |
-} |
- |
- |
-// Visitor class for marking heap roots. |
-class RootMarkingVisitor : public ObjectVisitor { |
- public: |
- explicit RootMarkingVisitor(Heap* heap) |
- : collector_(heap->mark_compact_collector()) {} |
- |
- void VisitPointer(Object** p) override { MarkObjectByPointer(p); } |
- |
- void VisitPointers(Object** start, Object** end) override { |
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
- } |
- |
- // Skip the weak next code link in a code object, which is visited in |
- // ProcessTopOptimizedFrame. |
- void VisitNextCodeLink(Object** p) override {} |
- |
- private: |
- void MarkObjectByPointer(Object** p) { |
- if (!(*p)->IsHeapObject()) return; |
- |
- // Replace flat cons strings in place. |
- HeapObject* object = HeapObject::cast(*p); |
- MarkBit mark_bit = Marking::MarkBitFrom(object); |
- if (Marking::IsBlackOrGrey(mark_bit)) return; |
- |
- Map* map = object->map(); |
- // Mark the object. |
- collector_->SetMark(object, mark_bit); |
- |
- // Mark the map pointer and body, and push them on the marking stack. |
- MarkBit map_mark = Marking::MarkBitFrom(map); |
- collector_->MarkObject(map, map_mark); |
- MarkCompactMarkingVisitor::IterateBody(map, object); |
- |
- // Mark all the objects reachable from the map and body. May leave |
- // overflowed objects in the heap. |
- collector_->EmptyMarkingDeque(); |
- } |
- |
- MarkCompactCollector* collector_; |
-}; |
- |
- |
-// Helper class for pruning the string table. |
-template <bool finalize_external_strings> |
-class StringTableCleaner : public ObjectVisitor { |
- public: |
- explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {} |
- |
- void VisitPointers(Object** start, Object** end) override { |
- // Visit all HeapObject pointers in [start, end). |
- for (Object** p = start; p < end; p++) { |
- Object* o = *p; |
- if (o->IsHeapObject() && |
- Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) { |
- if (finalize_external_strings) { |
- DCHECK(o->IsExternalString()); |
- heap_->FinalizeExternalString(String::cast(*p)); |
- } else { |
- pointers_removed_++; |
- } |
- // Set the entry to the_hole_value (as deleted). |
- *p = heap_->the_hole_value(); |
- } |
- } |
- } |
- |
- int PointersRemoved() { |
- DCHECK(!finalize_external_strings); |
- return pointers_removed_; |
- } |
- |
- private: |
- Heap* heap_; |
- int pointers_removed_; |
-}; |
- |
- |
-typedef StringTableCleaner<false> InternalizedStringTableCleaner; |
-typedef StringTableCleaner<true> ExternalStringTableCleaner; |
- |
- |
-// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects |
-// are retained. |
-class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
- public: |
- virtual Object* RetainAs(Object* object) { |
- if (Marking::IsBlackOrGrey( |
- Marking::MarkBitFrom(HeapObject::cast(object)))) { |
- return object; |
- } else if (object->IsAllocationSite() && |
- !(AllocationSite::cast(object)->IsZombie())) { |
- // "dead" AllocationSites need to live long enough for a traversal of new |
- // space. These sites get a one-time reprieve. |
- AllocationSite* site = AllocationSite::cast(object); |
- site->MarkZombie(); |
- site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site); |
- return object; |
- } else { |
- return NULL; |
- } |
- } |
-}; |
- |
- |
-// Fill the marking stack with overflowed objects returned by the given |
-// iterator. Stop when the marking stack is filled or the end of the space |
-// is reached, whichever comes first. |
-template <class T> |
-void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) { |
- // The caller should ensure that the marking stack is initially not full, |
- // so that we don't waste effort pointlessly scanning for objects. |
- DCHECK(!marking_deque()->IsFull()); |
- |
- Map* filler_map = heap()->one_pointer_filler_map(); |
- for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) { |
- MarkBit markbit = Marking::MarkBitFrom(object); |
- if ((object->map() != filler_map) && Marking::IsGrey(markbit)) { |
- Marking::GreyToBlack(markbit); |
- PushBlack(object); |
- if (marking_deque()->IsFull()) return; |
- } |
- } |
-} |
- |
- |
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts); |
- |
- |
-void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) { |
- DCHECK(!marking_deque()->IsFull()); |
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
- |
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
- Address cell_base = it.CurrentCellBase(); |
- MarkBit::CellType* cell = it.CurrentCell(); |
- |
- const MarkBit::CellType current_cell = *cell; |
- if (current_cell == 0) continue; |
- |
- MarkBit::CellType grey_objects; |
- if (it.HasNext()) { |
- const MarkBit::CellType next_cell = *(cell + 1); |
- grey_objects = current_cell & ((current_cell >> 1) | |
- (next_cell << (Bitmap::kBitsPerCell - 1))); |
- } else { |
- grey_objects = current_cell & (current_cell >> 1); |
- } |
- |
- int offset = 0; |
- while (grey_objects != 0) { |
- int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects); |
- grey_objects >>= trailing_zeros; |
- offset += trailing_zeros; |
- MarkBit markbit(cell, 1 << offset); |
- DCHECK(Marking::IsGrey(markbit)); |
- Marking::GreyToBlack(markbit); |
- Address addr = cell_base + offset * kPointerSize; |
- HeapObject* object = HeapObject::FromAddress(addr); |
- PushBlack(object); |
- if (marking_deque()->IsFull()) return; |
- offset += 2; |
- grey_objects >>= 2; |
- } |
- |
- grey_objects >>= (Bitmap::kBitsPerCell - 1); |
- } |
-} |
- |
- |
-int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( |
- NewSpace* new_space, NewSpacePage* p) { |
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
- |
- MarkBit::CellType* cells = p->markbits()->cells(); |
- int survivors_size = 0; |
- |
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
- Address cell_base = it.CurrentCellBase(); |
- MarkBit::CellType* cell = it.CurrentCell(); |
- |
- MarkBit::CellType current_cell = *cell; |
- if (current_cell == 0) continue; |
- |
- int offset = 0; |
- while (current_cell != 0) { |
- int trailing_zeros = base::bits::CountTrailingZeros32(current_cell); |
- current_cell >>= trailing_zeros; |
- offset += trailing_zeros; |
- Address address = cell_base + offset * kPointerSize; |
- HeapObject* object = HeapObject::FromAddress(address); |
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
- |
- int size = object->Size(); |
- survivors_size += size; |
- |
- Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
- |
- offset += 2; |
- current_cell >>= 2; |
- |
- // TODO(hpayer): Refactor EvacuateObject and call this function instead. |
- if (heap()->ShouldBePromoted(object->address(), size) && |
- TryPromoteObject(object, size)) { |
- continue; |
- } |
- |
- AllocationAlignment alignment = object->RequiredAlignment(); |
- AllocationResult allocation = new_space->AllocateRaw(size, alignment); |
- if (allocation.IsRetry()) { |
- if (!new_space->AddFreshPage()) { |
- // Shouldn't happen. We are sweeping linearly, and to-space |
- // has the same number of pages as from-space, so there is |
- // always room unless we are in an OOM situation. |
- FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
- } |
- allocation = new_space->AllocateRaw(size, alignment); |
- DCHECK(!allocation.IsRetry()); |
- } |
- Object* target = allocation.ToObjectChecked(); |
- |
- MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
- if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
- heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
- } |
- heap()->IncrementSemiSpaceCopiedObjectSize(size); |
- } |
- *cells = 0; |
- } |
- return survivors_size; |
-} |
- |
- |
-void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { |
- PageIterator it(space); |
- while (it.has_next()) { |
- Page* p = it.next(); |
- DiscoverGreyObjectsOnPage(p); |
- if (marking_deque()->IsFull()) return; |
- } |
-} |
- |
- |
-void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() { |
- NewSpace* space = heap()->new_space(); |
- NewSpacePageIterator it(space->bottom(), space->top()); |
- while (it.has_next()) { |
- NewSpacePage* page = it.next(); |
- DiscoverGreyObjectsOnPage(page); |
- if (marking_deque()->IsFull()) return; |
- } |
-} |
- |
- |
-bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { |
- Object* o = *p; |
- if (!o->IsHeapObject()) return false; |
- HeapObject* heap_object = HeapObject::cast(o); |
- MarkBit mark = Marking::MarkBitFrom(heap_object); |
- return Marking::IsWhite(mark); |
-} |
- |
- |
-bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap, |
- Object** p) { |
- Object* o = *p; |
- DCHECK(o->IsHeapObject()); |
- HeapObject* heap_object = HeapObject::cast(o); |
- MarkBit mark = Marking::MarkBitFrom(heap_object); |
- return Marking::IsWhite(mark); |
-} |
- |
- |
-void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) { |
- StringTable* string_table = heap()->string_table(); |
- // Mark the string table itself. |
- MarkBit string_table_mark = Marking::MarkBitFrom(string_table); |
- if (Marking::IsWhite(string_table_mark)) { |
- // String table could have already been marked by visiting the handles list. |
- SetMark(string_table, string_table_mark); |
- } |
- // Explicitly mark the prefix. |
- string_table->IteratePrefix(visitor); |
- ProcessMarkingDeque(); |
-} |
- |
- |
-void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) { |
- MarkBit mark_bit = Marking::MarkBitFrom(site); |
- SetMark(site, mark_bit); |
-} |
- |
- |
-void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { |
- // Mark the heap roots including global variables, stack variables, |
- // etc., and all objects reachable from them. |
- heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); |
- |
- // Handle the string table specially. |
- MarkStringTable(visitor); |
- |
- // There may be overflowed objects in the heap. Visit them now. |
- while (marking_deque_.overflowed()) { |
- RefillMarkingDeque(); |
- EmptyMarkingDeque(); |
- } |
-} |
- |
- |
-void MarkCompactCollector::MarkImplicitRefGroups( |
- MarkObjectFunction mark_object) { |
- List<ImplicitRefGroup*>* ref_groups = |
- isolate()->global_handles()->implicit_ref_groups(); |
- |
- int last = 0; |
- for (int i = 0; i < ref_groups->length(); i++) { |
- ImplicitRefGroup* entry = ref_groups->at(i); |
- DCHECK(entry != NULL); |
- |
- if (!IsMarked(*entry->parent)) { |
- (*ref_groups)[last++] = entry; |
- continue; |
- } |
- |
- Object*** children = entry->children; |
- // A parent object is marked, so mark all child heap objects. |
- for (size_t j = 0; j < entry->length; ++j) { |
- if ((*children[j])->IsHeapObject()) { |
- mark_object(heap(), HeapObject::cast(*children[j])); |
- } |
- } |
- |
- // Once the entire group has been marked, dispose it because it's |
- // not needed anymore. |
- delete entry; |
- } |
- ref_groups->Rewind(last); |
-} |
- |
- |
-// Mark all objects reachable from the objects on the marking stack. |
-// Before: the marking stack contains zero or more heap object pointers. |
-// After: the marking stack is empty, and all objects reachable from the |
-// marking stack have been marked, or are overflowed in the heap. |
-void MarkCompactCollector::EmptyMarkingDeque() { |
- Map* filler_map = heap_->one_pointer_filler_map(); |
- while (!marking_deque_.IsEmpty()) { |
- HeapObject* object = marking_deque_.Pop(); |
- // Explicitly skip one word fillers. Incremental markbit patterns are |
- // correct only for objects that occupy at least two words. |
- Map* map = object->map(); |
- if (map == filler_map) continue; |
- |
- DCHECK(object->IsHeapObject()); |
- DCHECK(heap()->Contains(object)); |
- DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object))); |
- |
- MarkBit map_mark = Marking::MarkBitFrom(map); |
- MarkObject(map, map_mark); |
- |
- MarkCompactMarkingVisitor::IterateBody(map, object); |
- } |
-} |
- |
- |
-// Sweep the heap for overflowed objects, clear their overflow bits, and |
-// push them on the marking stack. Stop early if the marking stack fills |
-// before sweeping completes. If sweeping completes, there are no remaining |
-// overflowed objects in the heap so the overflow flag on the markings stack |
-// is cleared. |
-void MarkCompactCollector::RefillMarkingDeque() { |
- isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow); |
- DCHECK(marking_deque_.overflowed()); |
- |
- DiscoverGreyObjectsInNewSpace(); |
- if (marking_deque_.IsFull()) return; |
- |
- DiscoverGreyObjectsInSpace(heap()->old_space()); |
- if (marking_deque_.IsFull()) return; |
- |
- DiscoverGreyObjectsInSpace(heap()->code_space()); |
- if (marking_deque_.IsFull()) return; |
- |
- DiscoverGreyObjectsInSpace(heap()->map_space()); |
- if (marking_deque_.IsFull()) return; |
- |
- LargeObjectIterator lo_it(heap()->lo_space()); |
- DiscoverGreyObjectsWithIterator(&lo_it); |
- if (marking_deque_.IsFull()) return; |
- |
- marking_deque_.ClearOverflowed(); |
-} |
- |
- |
-// Mark all objects reachable (transitively) from objects on the marking |
-// stack. Before: the marking stack contains zero or more heap object |
-// pointers. After: the marking stack is empty and there are no overflowed |
-// objects in the heap. |
-void MarkCompactCollector::ProcessMarkingDeque() { |
- EmptyMarkingDeque(); |
- while (marking_deque_.overflowed()) { |
- RefillMarkingDeque(); |
- EmptyMarkingDeque(); |
- } |
-} |
- |
- |
-// Mark all objects reachable (transitively) from objects on the marking |
-// stack including references only considered in the atomic marking pause. |
-void MarkCompactCollector::ProcessEphemeralMarking( |
- ObjectVisitor* visitor, bool only_process_harmony_weak_collections) { |
- bool work_to_do = true; |
- DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed()); |
- while (work_to_do) { |
- if (!only_process_harmony_weak_collections) { |
- isolate()->global_handles()->IterateObjectGroups( |
- visitor, &IsUnmarkedHeapObjectWithHeap); |
- MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject); |
- } |
- ProcessWeakCollections(); |
- work_to_do = !marking_deque_.IsEmpty(); |
- ProcessMarkingDeque(); |
- } |
-} |
- |
- |
-void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) { |
- for (StackFrameIterator it(isolate(), isolate()->thread_local_top()); |
- !it.done(); it.Advance()) { |
- if (it.frame()->type() == StackFrame::JAVA_SCRIPT) { |
- return; |
- } |
- if (it.frame()->type() == StackFrame::OPTIMIZED) { |
- Code* code = it.frame()->LookupCode(); |
- if (!code->CanDeoptAt(it.frame()->pc())) { |
- code->CodeIterateBody(visitor); |
- } |
- ProcessMarkingDeque(); |
- return; |
- } |
- } |
-} |
- |
- |
-void MarkCompactCollector::RetainMaps() { |
- if (heap()->ShouldReduceMemory() || heap()->ShouldAbortIncrementalMarking() || |
- FLAG_retain_maps_for_n_gc == 0) { |
- // Do not retain dead maps if flag disables it or there is |
- // - memory pressure (reduce_memory_footprint_), |
- // - GC is requested by tests or dev-tools (abort_incremental_marking_). |
- return; |
- } |
- |
- ArrayList* retained_maps = heap()->retained_maps(); |
- int length = retained_maps->Length(); |
- int new_length = 0; |
- for (int i = 0; i < length; i += 2) { |
- DCHECK(retained_maps->Get(i)->IsWeakCell()); |
- WeakCell* cell = WeakCell::cast(retained_maps->Get(i)); |
- if (cell->cleared()) continue; |
- int age = Smi::cast(retained_maps->Get(i + 1))->value(); |
- int new_age; |
- Map* map = Map::cast(cell->value()); |
- MarkBit map_mark = Marking::MarkBitFrom(map); |
- if (Marking::IsWhite(map_mark)) { |
- if (age == 0) { |
- // The map has aged. Do not retain this map. |
- continue; |
- } |
- Object* constructor = map->GetConstructor(); |
- if (!constructor->IsHeapObject() || Marking::IsWhite(Marking::MarkBitFrom( |
- HeapObject::cast(constructor)))) { |
- // The constructor is dead, no new objects with this map can |
- // be created. Do not retain this map. |
- continue; |
- } |
- Object* prototype = map->prototype(); |
- if (prototype->IsHeapObject() && |
- Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) { |
- // The prototype is not marked, age the map. |
- new_age = age - 1; |
- } else { |
- // The prototype and the constructor are marked, this map keeps only |
- // transition tree alive, not JSObjects. Do not age the map. |
- new_age = age; |
- } |
- MarkObject(map, map_mark); |
- } else { |
- new_age = FLAG_retain_maps_for_n_gc; |
- } |
- if (i != new_length) { |
- retained_maps->Set(new_length, cell); |
- Object** slot = retained_maps->Slot(new_length); |
- RecordSlot(retained_maps, slot, cell); |
- retained_maps->Set(new_length + 1, Smi::FromInt(new_age)); |
- } else if (new_age != age) { |
- retained_maps->Set(new_length + 1, Smi::FromInt(new_age)); |
- } |
- new_length += 2; |
- } |
- Object* undefined = heap()->undefined_value(); |
- for (int i = new_length; i < length; i++) { |
- retained_maps->Clear(i, undefined); |
- } |
- if (new_length != length) retained_maps->SetLength(new_length); |
- ProcessMarkingDeque(); |
-} |
- |
- |
-void MarkCompactCollector::EnsureMarkingDequeIsReserved() { |
- DCHECK(!marking_deque_.in_use()); |
- if (marking_deque_memory_ == NULL) { |
- marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize); |
- marking_deque_memory_committed_ = 0; |
- } |
- if (marking_deque_memory_ == NULL) { |
- V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved"); |
- } |
-} |
- |
- |
-void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) { |
- // If the marking deque is too small, we try to allocate a bigger one. |
- // If that fails, make do with a smaller one. |
- CHECK(!marking_deque_.in_use()); |
- for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) { |
- base::VirtualMemory* memory = marking_deque_memory_; |
- size_t currently_committed = marking_deque_memory_committed_; |
- |
- if (currently_committed == size) return; |
- |
- if (currently_committed > size) { |
- bool success = marking_deque_memory_->Uncommit( |
- reinterpret_cast<Address>(marking_deque_memory_->address()) + size, |
- currently_committed - size); |
- if (success) { |
- marking_deque_memory_committed_ = size; |
- return; |
- } |
- UNREACHABLE(); |
- } |
- |
- bool success = memory->Commit( |
- reinterpret_cast<Address>(memory->address()) + currently_committed, |
- size - currently_committed, |
- false); // Not executable. |
- if (success) { |
- marking_deque_memory_committed_ = size; |
- return; |
- } |
- } |
- V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted"); |
-} |
- |
- |
-void MarkCompactCollector::InitializeMarkingDeque() { |
- DCHECK(!marking_deque_.in_use()); |
- DCHECK(marking_deque_memory_committed_ > 0); |
- Address addr = static_cast<Address>(marking_deque_memory_->address()); |
- size_t size = marking_deque_memory_committed_; |
- if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; |
- marking_deque_.Initialize(addr, addr + size); |
-} |
- |
- |
-void MarkingDeque::Initialize(Address low, Address high) { |
- DCHECK(!in_use_); |
- HeapObject** obj_low = reinterpret_cast<HeapObject**>(low); |
- HeapObject** obj_high = reinterpret_cast<HeapObject**>(high); |
- array_ = obj_low; |
- mask_ = base::bits::RoundDownToPowerOfTwo32( |
- static_cast<uint32_t>(obj_high - obj_low)) - |
- 1; |
- top_ = bottom_ = 0; |
- overflowed_ = false; |
- in_use_ = true; |
-} |
- |
- |
-void MarkingDeque::Uninitialize(bool aborting) { |
- if (!aborting) { |
- DCHECK(IsEmpty()); |
- DCHECK(!overflowed_); |
- } |
- DCHECK(in_use_); |
- top_ = bottom_ = 0xdecbad; |
- in_use_ = false; |
-} |
- |
- |
-void MarkCompactCollector::MarkLiveObjects() { |
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK); |
- double start_time = 0.0; |
- if (FLAG_print_cumulative_gc_stat) { |
- start_time = base::OS::TimeCurrentMillis(); |
- } |
- // The recursive GC marker detects when it is nearing stack overflow, |
- // and switches to a different marking system. JS interrupts interfere |
- // with the C stack limit check. |
- PostponeInterruptsScope postpone(isolate()); |
- |
- { |
- GCTracer::Scope gc_scope(heap()->tracer(), |
- GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL); |
- IncrementalMarking* incremental_marking = heap_->incremental_marking(); |
- if (was_marked_incrementally_) { |
- incremental_marking->Finalize(); |
- } else { |
- // Abort any pending incremental activities e.g. incremental sweeping. |
- incremental_marking->Stop(); |
- if (marking_deque_.in_use()) { |
- marking_deque_.Uninitialize(true); |
- } |
- } |
- } |
- |
-#ifdef DEBUG |
- DCHECK(state_ == PREPARE_GC); |
- state_ = MARK_LIVE_OBJECTS; |
-#endif |
- |
- EnsureMarkingDequeIsCommittedAndInitialize( |
- MarkCompactCollector::kMaxMarkingDequeSize); |
- |
- { |
- GCTracer::Scope gc_scope(heap()->tracer(), |
- GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH); |
- PrepareForCodeFlushing(); |
- } |
- |
- RootMarkingVisitor root_visitor(heap()); |
- |
- { |
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOT); |
- MarkRoots(&root_visitor); |
- } |
- |
- { |
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_TOPOPT); |
- ProcessTopOptimizedFrame(&root_visitor); |
- } |
- |
- // Retaining dying maps should happen before or during ephemeral marking |
- // because a map could keep the key of an ephemeron alive. Note that map |
- // aging is imprecise: maps that are kept alive only by ephemerons will age. |
- { |
- GCTracer::Scope gc_scope(heap()->tracer(), |
- GCTracer::Scope::MC_MARK_RETAIN_MAPS); |
- RetainMaps(); |
- } |
- |
- { |
- GCTracer::Scope gc_scope(heap()->tracer(), |
- GCTracer::Scope::MC_MARK_WEAK_CLOSURE); |
- |
- // The objects reachable from the roots are marked, yet unreachable |
- // objects are unmarked. Mark objects reachable due to host |
- // application specific logic or through Harmony weak maps. |
- ProcessEphemeralMarking(&root_visitor, false); |
- |
- // The objects reachable from the roots, weak maps or object groups |
- // are marked. Objects pointed to only by weak global handles cannot be |
- // immediately reclaimed. Instead, we have to mark them as pending and mark |
- // objects reachable from them. |
- // |
- // First we identify nonlive weak handles and mark them as pending |
- // destruction. |
- heap()->isolate()->global_handles()->IdentifyWeakHandles( |
- &IsUnmarkedHeapObject); |
- // Then we mark the objects. |
- heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); |
- ProcessMarkingDeque(); |
- |
- // Repeat Harmony weak maps marking to mark unmarked objects reachable from |
- // the weak roots we just marked as pending destruction. |
- // |
- // We only process harmony collections, as all object groups have been fully |
- // processed and no weakly reachable node can discover new objects groups. |
- ProcessEphemeralMarking(&root_visitor, true); |
- } |
- |
- AfterMarking(); |
- |
- if (FLAG_print_cumulative_gc_stat) { |
- heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time); |
- } |
-} |
- |
- |
-void MarkCompactCollector::AfterMarking() { |
- { |
- GCTracer::Scope gc_scope(heap()->tracer(), |
- GCTracer::Scope::MC_MARK_STRING_TABLE); |
- |
- // Prune the string table removing all strings only pointed to by the |
- // string table. Cannot use string_table() here because the string |
- // table is marked. |
- StringTable* string_table = heap()->string_table(); |
- InternalizedStringTableCleaner internalized_visitor(heap()); |
- string_table->IterateElements(&internalized_visitor); |
- string_table->ElementsRemoved(internalized_visitor.PointersRemoved()); |
- |
- ExternalStringTableCleaner external_visitor(heap()); |
- heap()->external_string_table_.Iterate(&external_visitor); |
- heap()->external_string_table_.CleanUp(); |
- } |
- |
- { |
- GCTracer::Scope gc_scope(heap()->tracer(), |
- GCTracer::Scope::MC_MARK_WEAK_REFERENCES); |
- |
- // Process the weak references. |
- MarkCompactWeakObjectRetainer mark_compact_object_retainer; |
- heap()->ProcessAllWeakReferences(&mark_compact_object_retainer); |
- } |
- |
- { |
- GCTracer::Scope gc_scope(heap()->tracer(), |
- GCTracer::Scope::MC_MARK_GLOBAL_HANDLES); |
- |
- // Remove object groups after marking phase. |
- heap()->isolate()->global_handles()->RemoveObjectGroups(); |
- heap()->isolate()->global_handles()->RemoveImplicitRefGroups(); |
- } |
- |
- // Flush code from collected candidates. |
- if (is_code_flushing_enabled()) { |
- GCTracer::Scope gc_scope(heap()->tracer(), |
- GCTracer::Scope::MC_MARK_CODE_FLUSH); |
- code_flusher_->ProcessCandidates(); |
- } |
- |
- // Process and clear all optimized code maps. |
- if (!FLAG_flush_optimized_code_cache) { |
- GCTracer::Scope gc_scope(heap()->tracer(), |
- GCTracer::Scope::MC_MARK_OPTIMIZED_CODE_MAPS); |
- ProcessAndClearOptimizedCodeMaps(); |
- } |
- |
- if (FLAG_track_gc_object_stats) { |
- if (FLAG_trace_gc_object_stats) { |
- heap()->object_stats_->TraceObjectStats(); |
- } |
- heap()->object_stats_->CheckpointObjectStats(); |
- } |
-} |
- |
- |
-void MarkCompactCollector::ProcessAndClearOptimizedCodeMaps() { |
- SharedFunctionInfo::Iterator iterator(isolate()); |
- while (SharedFunctionInfo* shared = iterator.Next()) { |
- if (shared->optimized_code_map()->IsSmi()) continue; |
+void CodeFlusher::ProcessOptimizedCodeMaps() { |
+ STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4); |
+ |
+ SharedFunctionInfo* holder = optimized_code_map_holder_head_; |
+ SharedFunctionInfo* next_holder; |
+ |
+ while (holder != NULL) { |
+ next_holder = GetNextCodeMap(holder); |
+ ClearNextCodeMap(holder); |
// Process context-dependent entries in the optimized code map. |
- FixedArray* code_map = FixedArray::cast(shared->optimized_code_map()); |
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); |
int new_length = SharedFunctionInfo::kEntriesStart; |
int old_length = code_map->length(); |
for (int i = SharedFunctionInfo::kEntriesStart; i < old_length; |
@@ -2155,13 +1023,16 @@ |
code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id); |
Object** code_slot = code_map->RawFieldOfElementAt( |
new_length + SharedFunctionInfo::kCachedCodeOffset); |
- RecordSlot(code_map, code_slot, *code_slot); |
+ isolate_->heap()->mark_compact_collector()->RecordSlot( |
+ code_map, code_slot, *code_slot); |
Object** context_slot = code_map->RawFieldOfElementAt( |
new_length + SharedFunctionInfo::kContextOffset); |
- RecordSlot(code_map, context_slot, *context_slot); |
+ isolate_->heap()->mark_compact_collector()->RecordSlot( |
+ code_map, context_slot, *context_slot); |
Object** literals_slot = code_map->RawFieldOfElementAt( |
new_length + SharedFunctionInfo::kLiteralsOffset); |
- RecordSlot(code_map, literals_slot, *literals_slot); |
+ isolate_->heap()->mark_compact_collector()->RecordSlot( |
+ code_map, literals_slot, *literals_slot); |
new_length += SharedFunctionInfo::kEntryLength; |
} |
@@ -2175,14 +1046,1199 @@ |
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code))); |
Object** slot = |
code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex); |
- RecordSlot(code_map, slot, *slot); |
+ isolate_->heap()->mark_compact_collector()->RecordSlot(code_map, slot, |
+ *slot); |
} |
} |
// Trim the optimized code map if entries have been removed. |
if (new_length < old_length) { |
- shared->TrimOptimizedCodeMap(old_length - new_length); |
- } |
+ holder->TrimOptimizedCodeMap(old_length - new_length); |
+ } |
+ |
+ holder = next_holder; |
+ } |
+ |
+ optimized_code_map_holder_head_ = NULL; |
+} |
+ |
+ |
+void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) { |
+ // Make sure previous flushing decisions are revisited. |
+ isolate_->heap()->incremental_marking()->RecordWrites(shared_info); |
+ |
+ if (FLAG_trace_code_flushing) { |
+ PrintF("[code-flushing abandons function-info: "); |
+ shared_info->ShortPrint(); |
+ PrintF("]\n"); |
+ } |
+ |
+ SharedFunctionInfo* candidate = shared_function_info_candidates_head_; |
+ SharedFunctionInfo* next_candidate; |
+ if (candidate == shared_info) { |
+ next_candidate = GetNextCandidate(shared_info); |
+ shared_function_info_candidates_head_ = next_candidate; |
+ ClearNextCandidate(shared_info); |
+ } else { |
+ while (candidate != NULL) { |
+ next_candidate = GetNextCandidate(candidate); |
+ |
+ if (next_candidate == shared_info) { |
+ next_candidate = GetNextCandidate(shared_info); |
+ SetNextCandidate(candidate, next_candidate); |
+ ClearNextCandidate(shared_info); |
+ break; |
+ } |
+ |
+ candidate = next_candidate; |
+ } |
+ } |
+} |
+ |
+ |
+void CodeFlusher::EvictCandidate(JSFunction* function) { |
+ DCHECK(!function->next_function_link()->IsUndefined()); |
+ Object* undefined = isolate_->heap()->undefined_value(); |
+ |
+ // Make sure previous flushing decisions are revisited. |
+ isolate_->heap()->incremental_marking()->RecordWrites(function); |
+ isolate_->heap()->incremental_marking()->RecordWrites(function->shared()); |
+ |
+ if (FLAG_trace_code_flushing) { |
+ PrintF("[code-flushing abandons closure: "); |
+ function->shared()->ShortPrint(); |
+ PrintF("]\n"); |
+ } |
+ |
+ JSFunction* candidate = jsfunction_candidates_head_; |
+ JSFunction* next_candidate; |
+ if (candidate == function) { |
+ next_candidate = GetNextCandidate(function); |
+ jsfunction_candidates_head_ = next_candidate; |
+ ClearNextCandidate(function, undefined); |
+ } else { |
+ while (candidate != NULL) { |
+ next_candidate = GetNextCandidate(candidate); |
+ |
+ if (next_candidate == function) { |
+ next_candidate = GetNextCandidate(function); |
+ SetNextCandidate(candidate, next_candidate); |
+ ClearNextCandidate(function, undefined); |
+ break; |
+ } |
+ |
+ candidate = next_candidate; |
+ } |
+ } |
+} |
+ |
+ |
+void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) { |
+ FixedArray* code_map = |
+ FixedArray::cast(code_map_holder->optimized_code_map()); |
+ DCHECK(!code_map->get(SharedFunctionInfo::kNextMapIndex)->IsUndefined()); |
+ |
+ // Make sure previous flushing decisions are revisited. |
+ isolate_->heap()->incremental_marking()->RecordWrites(code_map); |
+ isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder); |
+ |
+ if (FLAG_trace_code_flushing) { |
+ PrintF("[code-flushing abandons code-map: "); |
+ code_map_holder->ShortPrint(); |
+ PrintF("]\n"); |
+ } |
+ |
+ SharedFunctionInfo* holder = optimized_code_map_holder_head_; |
+ SharedFunctionInfo* next_holder; |
+ if (holder == code_map_holder) { |
+ next_holder = GetNextCodeMap(code_map_holder); |
+ optimized_code_map_holder_head_ = next_holder; |
+ ClearNextCodeMap(code_map_holder); |
+ } else { |
+ while (holder != NULL) { |
+ next_holder = GetNextCodeMap(holder); |
+ |
+ if (next_holder == code_map_holder) { |
+ next_holder = GetNextCodeMap(code_map_holder); |
+ SetNextCodeMap(holder, next_holder); |
+ ClearNextCodeMap(code_map_holder); |
+ break; |
+ } |
+ |
+ holder = next_holder; |
+ } |
+ } |
+} |
+ |
+ |
+void CodeFlusher::EvictJSFunctionCandidates() { |
+ JSFunction* candidate = jsfunction_candidates_head_; |
+ JSFunction* next_candidate; |
+ while (candidate != NULL) { |
+ next_candidate = GetNextCandidate(candidate); |
+ EvictCandidate(candidate); |
+ candidate = next_candidate; |
+ } |
+ DCHECK(jsfunction_candidates_head_ == NULL); |
+} |
+ |
+ |
+void CodeFlusher::EvictSharedFunctionInfoCandidates() { |
+ SharedFunctionInfo* candidate = shared_function_info_candidates_head_; |
+ SharedFunctionInfo* next_candidate; |
+ while (candidate != NULL) { |
+ next_candidate = GetNextCandidate(candidate); |
+ EvictCandidate(candidate); |
+ candidate = next_candidate; |
+ } |
+ DCHECK(shared_function_info_candidates_head_ == NULL); |
+} |
+ |
+ |
+void CodeFlusher::EvictOptimizedCodeMaps() { |
+ SharedFunctionInfo* holder = optimized_code_map_holder_head_; |
+ SharedFunctionInfo* next_holder; |
+ while (holder != NULL) { |
+ next_holder = GetNextCodeMap(holder); |
+ EvictOptimizedCodeMap(holder); |
+ holder = next_holder; |
+ } |
+ DCHECK(optimized_code_map_holder_head_ == NULL); |
+} |
+ |
+ |
+void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) { |
+ Heap* heap = isolate_->heap(); |
+ |
+ JSFunction** slot = &jsfunction_candidates_head_; |
+ JSFunction* candidate = jsfunction_candidates_head_; |
+ while (candidate != NULL) { |
+ if (heap->InFromSpace(candidate)) { |
+ v->VisitPointer(reinterpret_cast<Object**>(slot)); |
+ } |
+ candidate = GetNextCandidate(*slot); |
+ slot = GetNextCandidateSlot(*slot); |
+ } |
+} |
+ |
+ |
+MarkCompactCollector::~MarkCompactCollector() { |
+ if (code_flusher_ != NULL) { |
+ delete code_flusher_; |
+ code_flusher_ = NULL; |
+ } |
+} |
+ |
+ |
+class MarkCompactMarkingVisitor |
+ : public StaticMarkingVisitor<MarkCompactMarkingVisitor> { |
+ public: |
+ static void Initialize(); |
+ |
+ INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) { |
+ MarkObjectByPointer(heap->mark_compact_collector(), object, p); |
+ } |
+ |
+ INLINE(static void VisitPointers(Heap* heap, HeapObject* object, |
+ Object** start, Object** end)) { |
+ // Mark all objects pointed to in [start, end). |
+ const int kMinRangeForMarkingRecursion = 64; |
+ if (end - start >= kMinRangeForMarkingRecursion) { |
+ if (VisitUnmarkedObjects(heap, object, start, end)) return; |
+ // We are close to a stack overflow, so just mark the objects. |
+ } |
+ MarkCompactCollector* collector = heap->mark_compact_collector(); |
+ for (Object** p = start; p < end; p++) { |
+ MarkObjectByPointer(collector, object, p); |
+ } |
+ } |
+ |
+ // Marks the object black and pushes it on the marking stack. |
+ INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { |
+ MarkBit mark = Marking::MarkBitFrom(object); |
+ heap->mark_compact_collector()->MarkObject(object, mark); |
+ } |
+ |
+ // Marks the object black without pushing it on the marking stack. |
+ // Returns true if object needed marking and false otherwise. |
+ INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) { |
+ MarkBit mark_bit = Marking::MarkBitFrom(object); |
+ if (Marking::IsWhite(mark_bit)) { |
+ heap->mark_compact_collector()->SetMark(object, mark_bit); |
+ return true; |
+ } |
+ return false; |
+ } |
+ |
+ // Mark object pointed to by p. |
+ INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, |
+ HeapObject* object, Object** p)) { |
+ if (!(*p)->IsHeapObject()) return; |
+ HeapObject* target_object = HeapObject::cast(*p); |
+ collector->RecordSlot(object, p, target_object); |
+ MarkBit mark = Marking::MarkBitFrom(target_object); |
+ collector->MarkObject(target_object, mark); |
+ } |
+ |
+ |
+ // Visit an unmarked object. |
+ INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, |
+ HeapObject* obj)) { |
+#ifdef DEBUG |
+ DCHECK(collector->heap()->Contains(obj)); |
+ DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj)); |
+#endif |
+ Map* map = obj->map(); |
+ Heap* heap = obj->GetHeap(); |
+ MarkBit mark = Marking::MarkBitFrom(obj); |
+ heap->mark_compact_collector()->SetMark(obj, mark); |
+ // Mark the map pointer and the body. |
+ MarkBit map_mark = Marking::MarkBitFrom(map); |
+ heap->mark_compact_collector()->MarkObject(map, map_mark); |
+ IterateBody(map, obj); |
+ } |
+ |
+ // Visit all unmarked objects pointed to by [start, end). |
+ // Returns false if the operation fails (lack of stack space). |
+ INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object, |
+ Object** start, Object** end)) { |
+ // Return false is we are close to the stack limit. |
+ StackLimitCheck check(heap->isolate()); |
+ if (check.HasOverflowed()) return false; |
+ |
+ MarkCompactCollector* collector = heap->mark_compact_collector(); |
+ // Visit the unmarked objects. |
+ for (Object** p = start; p < end; p++) { |
+ Object* o = *p; |
+ if (!o->IsHeapObject()) continue; |
+ collector->RecordSlot(object, p, o); |
+ HeapObject* obj = HeapObject::cast(o); |
+ MarkBit mark = Marking::MarkBitFrom(obj); |
+ if (Marking::IsBlackOrGrey(mark)) continue; |
+ VisitUnmarkedObject(collector, obj); |
+ } |
+ return true; |
+ } |
+ |
+ private: |
+ template <int id> |
+ static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj); |
+ |
+ // Code flushing support. |
+ |
+ static const int kRegExpCodeThreshold = 5; |
+ |
+ static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re, |
+ bool is_one_byte) { |
+ // Make sure that the fixed array is in fact initialized on the RegExp. |
+ // We could potentially trigger a GC when initializing the RegExp. |
+ if (HeapObject::cast(re->data())->map()->instance_type() != |
+ FIXED_ARRAY_TYPE) |
+ return; |
+ |
+ // Make sure this is a RegExp that actually contains code. |
+ if (re->TypeTag() != JSRegExp::IRREGEXP) return; |
+ |
+ Object* code = re->DataAt(JSRegExp::code_index(is_one_byte)); |
+ if (!code->IsSmi() && |
+ HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) { |
+ // Save a copy that can be reinstated if we need the code again. |
+ re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code); |
+ |
+ // Saving a copy might create a pointer into compaction candidate |
+ // that was not observed by marker. This might happen if JSRegExp data |
+ // was marked through the compilation cache before marker reached JSRegExp |
+ // object. |
+ FixedArray* data = FixedArray::cast(re->data()); |
+ Object** slot = |
+ data->data_start() + JSRegExp::saved_code_index(is_one_byte); |
+ heap->mark_compact_collector()->RecordSlot(data, slot, code); |
+ |
+ // Set a number in the 0-255 range to guarantee no smi overflow. |
+ re->SetDataAt(JSRegExp::code_index(is_one_byte), |
+ Smi::FromInt(heap->ms_count() & 0xff)); |
+ } else if (code->IsSmi()) { |
+ int value = Smi::cast(code)->value(); |
+ // The regexp has not been compiled yet or there was a compilation error. |
+ if (value == JSRegExp::kUninitializedValue || |
+ value == JSRegExp::kCompilationErrorValue) { |
+ return; |
+ } |
+ |
+ // Check if we should flush now. |
+ if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) { |
+ re->SetDataAt(JSRegExp::code_index(is_one_byte), |
+ Smi::FromInt(JSRegExp::kUninitializedValue)); |
+ re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), |
+ Smi::FromInt(JSRegExp::kUninitializedValue)); |
+ } |
+ } |
+ } |
+ |
+ |
+ // Works by setting the current sweep_generation (as a smi) in the |
+ // code object place in the data array of the RegExp and keeps a copy |
+ // around that can be reinstated if we reuse the RegExp before flushing. |
+ // If we did not use the code for kRegExpCodeThreshold mark sweep GCs |
+ // we flush the code. |
+ static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) { |
+ Heap* heap = map->GetHeap(); |
+ MarkCompactCollector* collector = heap->mark_compact_collector(); |
+ if (!collector->is_code_flushing_enabled()) { |
+ VisitJSRegExp(map, object); |
+ return; |
+ } |
+ JSRegExp* re = reinterpret_cast<JSRegExp*>(object); |
+ // Flush code or set age on both one byte and two byte code. |
+ UpdateRegExpCodeAgeAndFlush(heap, re, true); |
+ UpdateRegExpCodeAgeAndFlush(heap, re, false); |
+ // Visit the fields of the RegExp, including the updated FixedArray. |
+ VisitJSRegExp(map, object); |
+ } |
+}; |
+ |
+ |
+void MarkCompactMarkingVisitor::Initialize() { |
+ StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize(); |
+ |
+ table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode); |
+ |
+ if (FLAG_track_gc_object_stats) { |
+ ObjectStatsVisitor::Initialize(&table_); |
+ } |
+} |
+ |
+ |
+class CodeMarkingVisitor : public ThreadVisitor { |
+ public: |
+ explicit CodeMarkingVisitor(MarkCompactCollector* collector) |
+ : collector_(collector) {} |
+ |
+ void VisitThread(Isolate* isolate, ThreadLocalTop* top) { |
+ collector_->PrepareThreadForCodeFlushing(isolate, top); |
+ } |
+ |
+ private: |
+ MarkCompactCollector* collector_; |
+}; |
+ |
+ |
+class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { |
+ public: |
+ explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) |
+ : collector_(collector) {} |
+ |
+ void VisitPointers(Object** start, Object** end) override { |
+ for (Object** p = start; p < end; p++) VisitPointer(p); |
+ } |
+ |
+ void VisitPointer(Object** slot) override { |
+ Object* obj = *slot; |
+ if (obj->IsSharedFunctionInfo()) { |
+ SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); |
+ MarkBit shared_mark = Marking::MarkBitFrom(shared); |
+ MarkBit code_mark = Marking::MarkBitFrom(shared->code()); |
+ collector_->MarkObject(shared->code(), code_mark); |
+ collector_->MarkObject(shared, shared_mark); |
+ } |
+ } |
+ |
+ private: |
+ MarkCompactCollector* collector_; |
+}; |
+ |
+ |
+void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate, |
+ ThreadLocalTop* top) { |
+ for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { |
+ // Note: for the frame that has a pending lazy deoptimization |
+ // StackFrame::unchecked_code will return a non-optimized code object for |
+ // the outermost function and StackFrame::LookupCode will return |
+ // actual optimized code object. |
+ StackFrame* frame = it.frame(); |
+ Code* code = frame->unchecked_code(); |
+ MarkBit code_mark = Marking::MarkBitFrom(code); |
+ MarkObject(code, code_mark); |
+ if (frame->is_optimized()) { |
+ MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(), |
+ frame->LookupCode()); |
+ } |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::PrepareForCodeFlushing() { |
+ // If code flushing is disabled, there is no need to prepare for it. |
+ if (!is_code_flushing_enabled()) return; |
+ |
+ // Ensure that empty descriptor array is marked. Method MarkDescriptorArray |
+ // relies on it being marked before any other descriptor array. |
+ HeapObject* descriptor_array = heap()->empty_descriptor_array(); |
+ MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array); |
+ MarkObject(descriptor_array, descriptor_array_mark); |
+ |
+ // Make sure we are not referencing the code from the stack. |
+ DCHECK(this == heap()->mark_compact_collector()); |
+ PrepareThreadForCodeFlushing(heap()->isolate(), |
+ heap()->isolate()->thread_local_top()); |
+ |
+ // Iterate the archived stacks in all threads to check if |
+ // the code is referenced. |
+ CodeMarkingVisitor code_marking_visitor(this); |
+ heap()->isolate()->thread_manager()->IterateArchivedThreads( |
+ &code_marking_visitor); |
+ |
+ SharedFunctionInfoMarkingVisitor visitor(this); |
+ heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); |
+ heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); |
+ |
+ ProcessMarkingDeque(); |
+} |
+ |
+ |
+// Visitor class for marking heap roots. |
+class RootMarkingVisitor : public ObjectVisitor { |
+ public: |
+ explicit RootMarkingVisitor(Heap* heap) |
+ : collector_(heap->mark_compact_collector()) {} |
+ |
+ void VisitPointer(Object** p) override { MarkObjectByPointer(p); } |
+ |
+ void VisitPointers(Object** start, Object** end) override { |
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
+ } |
+ |
+ // Skip the weak next code link in a code object, which is visited in |
+ // ProcessTopOptimizedFrame. |
+ void VisitNextCodeLink(Object** p) override {} |
+ |
+ private: |
+ void MarkObjectByPointer(Object** p) { |
+ if (!(*p)->IsHeapObject()) return; |
+ |
+ // Replace flat cons strings in place. |
+ HeapObject* object = HeapObject::cast(*p); |
+ MarkBit mark_bit = Marking::MarkBitFrom(object); |
+ if (Marking::IsBlackOrGrey(mark_bit)) return; |
+ |
+ Map* map = object->map(); |
+ // Mark the object. |
+ collector_->SetMark(object, mark_bit); |
+ |
+ // Mark the map pointer and body, and push them on the marking stack. |
+ MarkBit map_mark = Marking::MarkBitFrom(map); |
+ collector_->MarkObject(map, map_mark); |
+ MarkCompactMarkingVisitor::IterateBody(map, object); |
+ |
+ // Mark all the objects reachable from the map and body. May leave |
+ // overflowed objects in the heap. |
+ collector_->EmptyMarkingDeque(); |
+ } |
+ |
+ MarkCompactCollector* collector_; |
+}; |
+ |
+ |
+// Helper class for pruning the string table. |
+template <bool finalize_external_strings> |
+class StringTableCleaner : public ObjectVisitor { |
+ public: |
+ explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {} |
+ |
+ void VisitPointers(Object** start, Object** end) override { |
+ // Visit all HeapObject pointers in [start, end). |
+ for (Object** p = start; p < end; p++) { |
+ Object* o = *p; |
+ if (o->IsHeapObject() && |
+ Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) { |
+ if (finalize_external_strings) { |
+ DCHECK(o->IsExternalString()); |
+ heap_->FinalizeExternalString(String::cast(*p)); |
+ } else { |
+ pointers_removed_++; |
+ } |
+ // Set the entry to the_hole_value (as deleted). |
+ *p = heap_->the_hole_value(); |
+ } |
+ } |
+ } |
+ |
+ int PointersRemoved() { |
+ DCHECK(!finalize_external_strings); |
+ return pointers_removed_; |
+ } |
+ |
+ private: |
+ Heap* heap_; |
+ int pointers_removed_; |
+}; |
+ |
+ |
+typedef StringTableCleaner<false> InternalizedStringTableCleaner; |
+typedef StringTableCleaner<true> ExternalStringTableCleaner; |
+ |
+ |
+// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects |
+// are retained. |
+class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
+ public: |
+ virtual Object* RetainAs(Object* object) { |
+ if (Marking::IsBlackOrGrey( |
+ Marking::MarkBitFrom(HeapObject::cast(object)))) { |
+ return object; |
+ } else if (object->IsAllocationSite() && |
+ !(AllocationSite::cast(object)->IsZombie())) { |
+ // "dead" AllocationSites need to live long enough for a traversal of new |
+ // space. These sites get a one-time reprieve. |
+ AllocationSite* site = AllocationSite::cast(object); |
+ site->MarkZombie(); |
+ site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site); |
+ return object; |
+ } else { |
+ return NULL; |
+ } |
+ } |
+}; |
+ |
+ |
+// Fill the marking stack with overflowed objects returned by the given |
+// iterator. Stop when the marking stack is filled or the end of the space |
+// is reached, whichever comes first. |
+template <class T> |
+void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) { |
+ // The caller should ensure that the marking stack is initially not full, |
+ // so that we don't waste effort pointlessly scanning for objects. |
+ DCHECK(!marking_deque()->IsFull()); |
+ |
+ Map* filler_map = heap()->one_pointer_filler_map(); |
+ for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) { |
+ MarkBit markbit = Marking::MarkBitFrom(object); |
+ if ((object->map() != filler_map) && Marking::IsGrey(markbit)) { |
+ Marking::GreyToBlack(markbit); |
+ PushBlack(object); |
+ if (marking_deque()->IsFull()) return; |
+ } |
+ } |
+} |
+ |
+ |
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts); |
+ |
+ |
+void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) { |
+ DCHECK(!marking_deque()->IsFull()); |
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
+ |
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
+ Address cell_base = it.CurrentCellBase(); |
+ MarkBit::CellType* cell = it.CurrentCell(); |
+ |
+ const MarkBit::CellType current_cell = *cell; |
+ if (current_cell == 0) continue; |
+ |
+ MarkBit::CellType grey_objects; |
+ if (it.HasNext()) { |
+ const MarkBit::CellType next_cell = *(cell + 1); |
+ grey_objects = current_cell & ((current_cell >> 1) | |
+ (next_cell << (Bitmap::kBitsPerCell - 1))); |
+ } else { |
+ grey_objects = current_cell & (current_cell >> 1); |
+ } |
+ |
+ int offset = 0; |
+ while (grey_objects != 0) { |
+ int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects); |
+ grey_objects >>= trailing_zeros; |
+ offset += trailing_zeros; |
+ MarkBit markbit(cell, 1 << offset); |
+ DCHECK(Marking::IsGrey(markbit)); |
+ Marking::GreyToBlack(markbit); |
+ Address addr = cell_base + offset * kPointerSize; |
+ HeapObject* object = HeapObject::FromAddress(addr); |
+ PushBlack(object); |
+ if (marking_deque()->IsFull()) return; |
+ offset += 2; |
+ grey_objects >>= 2; |
+ } |
+ |
+ grey_objects >>= (Bitmap::kBitsPerCell - 1); |
+ } |
+} |
+ |
+ |
+int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( |
+ NewSpace* new_space, NewSpacePage* p) { |
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
+ |
+ MarkBit::CellType* cells = p->markbits()->cells(); |
+ int survivors_size = 0; |
+ |
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
+ Address cell_base = it.CurrentCellBase(); |
+ MarkBit::CellType* cell = it.CurrentCell(); |
+ |
+ MarkBit::CellType current_cell = *cell; |
+ if (current_cell == 0) continue; |
+ |
+ int offset = 0; |
+ while (current_cell != 0) { |
+ int trailing_zeros = base::bits::CountTrailingZeros32(current_cell); |
+ current_cell >>= trailing_zeros; |
+ offset += trailing_zeros; |
+ Address address = cell_base + offset * kPointerSize; |
+ HeapObject* object = HeapObject::FromAddress(address); |
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
+ |
+ int size = object->Size(); |
+ survivors_size += size; |
+ |
+ Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); |
+ |
+ offset += 2; |
+ current_cell >>= 2; |
+ |
+ // TODO(hpayer): Refactor EvacuateObject and call this function instead. |
+ if (heap()->ShouldBePromoted(object->address(), size) && |
+ TryPromoteObject(object, size)) { |
+ continue; |
+ } |
+ |
+ AllocationAlignment alignment = object->RequiredAlignment(); |
+ AllocationResult allocation = new_space->AllocateRaw(size, alignment); |
+ if (allocation.IsRetry()) { |
+ if (!new_space->AddFreshPage()) { |
+ // Shouldn't happen. We are sweeping linearly, and to-space |
+ // has the same number of pages as from-space, so there is |
+ // always room unless we are in an OOM situation. |
+ FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n"); |
+ } |
+ allocation = new_space->AllocateRaw(size, alignment); |
+ DCHECK(!allocation.IsRetry()); |
+ } |
+ Object* target = allocation.ToObjectChecked(); |
+ |
+ MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr); |
+ if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
+ heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
+ } |
+ heap()->IncrementSemiSpaceCopiedObjectSize(size); |
+ } |
+ *cells = 0; |
+ } |
+ return survivors_size; |
+} |
+ |
+ |
+void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { |
+ PageIterator it(space); |
+ while (it.has_next()) { |
+ Page* p = it.next(); |
+ DiscoverGreyObjectsOnPage(p); |
+ if (marking_deque()->IsFull()) return; |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() { |
+ NewSpace* space = heap()->new_space(); |
+ NewSpacePageIterator it(space->bottom(), space->top()); |
+ while (it.has_next()) { |
+ NewSpacePage* page = it.next(); |
+ DiscoverGreyObjectsOnPage(page); |
+ if (marking_deque()->IsFull()) return; |
+ } |
+} |
+ |
+ |
+bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { |
+ Object* o = *p; |
+ if (!o->IsHeapObject()) return false; |
+ HeapObject* heap_object = HeapObject::cast(o); |
+ MarkBit mark = Marking::MarkBitFrom(heap_object); |
+ return Marking::IsWhite(mark); |
+} |
+ |
+ |
+bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap, |
+ Object** p) { |
+ Object* o = *p; |
+ DCHECK(o->IsHeapObject()); |
+ HeapObject* heap_object = HeapObject::cast(o); |
+ MarkBit mark = Marking::MarkBitFrom(heap_object); |
+ return Marking::IsWhite(mark); |
+} |
+ |
+ |
+void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) { |
+ StringTable* string_table = heap()->string_table(); |
+ // Mark the string table itself. |
+ MarkBit string_table_mark = Marking::MarkBitFrom(string_table); |
+ if (Marking::IsWhite(string_table_mark)) { |
+ // String table could have already been marked by visiting the handles list. |
+ SetMark(string_table, string_table_mark); |
+ } |
+ // Explicitly mark the prefix. |
+ string_table->IteratePrefix(visitor); |
+ ProcessMarkingDeque(); |
+} |
+ |
+ |
+void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) { |
+ MarkBit mark_bit = Marking::MarkBitFrom(site); |
+ SetMark(site, mark_bit); |
+} |
+ |
+ |
+void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { |
+ // Mark the heap roots including global variables, stack variables, |
+ // etc., and all objects reachable from them. |
+ heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); |
+ |
+ // Handle the string table specially. |
+ MarkStringTable(visitor); |
+ |
+ // There may be overflowed objects in the heap. Visit them now. |
+ while (marking_deque_.overflowed()) { |
+ RefillMarkingDeque(); |
+ EmptyMarkingDeque(); |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::MarkImplicitRefGroups( |
+ MarkObjectFunction mark_object) { |
+ List<ImplicitRefGroup*>* ref_groups = |
+ isolate()->global_handles()->implicit_ref_groups(); |
+ |
+ int last = 0; |
+ for (int i = 0; i < ref_groups->length(); i++) { |
+ ImplicitRefGroup* entry = ref_groups->at(i); |
+ DCHECK(entry != NULL); |
+ |
+ if (!IsMarked(*entry->parent)) { |
+ (*ref_groups)[last++] = entry; |
+ continue; |
+ } |
+ |
+ Object*** children = entry->children; |
+ // A parent object is marked, so mark all child heap objects. |
+ for (size_t j = 0; j < entry->length; ++j) { |
+ if ((*children[j])->IsHeapObject()) { |
+ mark_object(heap(), HeapObject::cast(*children[j])); |
+ } |
+ } |
+ |
+ // Once the entire group has been marked, dispose it because it's |
+ // not needed anymore. |
+ delete entry; |
+ } |
+ ref_groups->Rewind(last); |
+} |
+ |
+ |
+// Mark all objects reachable from the objects on the marking stack. |
+// Before: the marking stack contains zero or more heap object pointers. |
+// After: the marking stack is empty, and all objects reachable from the |
+// marking stack have been marked, or are overflowed in the heap. |
+void MarkCompactCollector::EmptyMarkingDeque() { |
+ Map* filler_map = heap_->one_pointer_filler_map(); |
+ while (!marking_deque_.IsEmpty()) { |
+ HeapObject* object = marking_deque_.Pop(); |
+ // Explicitly skip one word fillers. Incremental markbit patterns are |
+ // correct only for objects that occupy at least two words. |
+ Map* map = object->map(); |
+ if (map == filler_map) continue; |
+ |
+ DCHECK(object->IsHeapObject()); |
+ DCHECK(heap()->Contains(object)); |
+ DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object))); |
+ |
+ MarkBit map_mark = Marking::MarkBitFrom(map); |
+ MarkObject(map, map_mark); |
+ |
+ MarkCompactMarkingVisitor::IterateBody(map, object); |
+ } |
+} |
+ |
+ |
+// Sweep the heap for overflowed objects, clear their overflow bits, and |
+// push them on the marking stack. Stop early if the marking stack fills |
+// before sweeping completes. If sweeping completes, there are no remaining |
+// overflowed objects in the heap so the overflow flag on the markings stack |
+// is cleared. |
+void MarkCompactCollector::RefillMarkingDeque() { |
+ isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow); |
+ DCHECK(marking_deque_.overflowed()); |
+ |
+ DiscoverGreyObjectsInNewSpace(); |
+ if (marking_deque_.IsFull()) return; |
+ |
+ DiscoverGreyObjectsInSpace(heap()->old_space()); |
+ if (marking_deque_.IsFull()) return; |
+ |
+ DiscoverGreyObjectsInSpace(heap()->code_space()); |
+ if (marking_deque_.IsFull()) return; |
+ |
+ DiscoverGreyObjectsInSpace(heap()->map_space()); |
+ if (marking_deque_.IsFull()) return; |
+ |
+ LargeObjectIterator lo_it(heap()->lo_space()); |
+ DiscoverGreyObjectsWithIterator(&lo_it); |
+ if (marking_deque_.IsFull()) return; |
+ |
+ marking_deque_.ClearOverflowed(); |
+} |
+ |
+ |
+// Mark all objects reachable (transitively) from objects on the marking |
+// stack. Before: the marking stack contains zero or more heap object |
+// pointers. After: the marking stack is empty and there are no overflowed |
+// objects in the heap. |
+void MarkCompactCollector::ProcessMarkingDeque() { |
+ EmptyMarkingDeque(); |
+ while (marking_deque_.overflowed()) { |
+ RefillMarkingDeque(); |
+ EmptyMarkingDeque(); |
+ } |
+} |
+ |
+ |
+// Mark all objects reachable (transitively) from objects on the marking |
+// stack including references only considered in the atomic marking pause. |
+void MarkCompactCollector::ProcessEphemeralMarking( |
+ ObjectVisitor* visitor, bool only_process_harmony_weak_collections) { |
+ bool work_to_do = true; |
+ DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed()); |
+ while (work_to_do) { |
+ if (!only_process_harmony_weak_collections) { |
+ isolate()->global_handles()->IterateObjectGroups( |
+ visitor, &IsUnmarkedHeapObjectWithHeap); |
+ MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject); |
+ } |
+ ProcessWeakCollections(); |
+ work_to_do = !marking_deque_.IsEmpty(); |
+ ProcessMarkingDeque(); |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) { |
+ for (StackFrameIterator it(isolate(), isolate()->thread_local_top()); |
+ !it.done(); it.Advance()) { |
+ if (it.frame()->type() == StackFrame::JAVA_SCRIPT) { |
+ return; |
+ } |
+ if (it.frame()->type() == StackFrame::OPTIMIZED) { |
+ Code* code = it.frame()->LookupCode(); |
+ if (!code->CanDeoptAt(it.frame()->pc())) { |
+ code->CodeIterateBody(visitor); |
+ } |
+ ProcessMarkingDeque(); |
+ return; |
+ } |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::RetainMaps() { |
+ if (heap()->ShouldReduceMemory() || heap()->ShouldAbortIncrementalMarking() || |
+ FLAG_retain_maps_for_n_gc == 0) { |
+ // Do not retain dead maps if flag disables it or there is |
+ // - memory pressure (reduce_memory_footprint_), |
+ // - GC is requested by tests or dev-tools (abort_incremental_marking_). |
+ return; |
+ } |
+ |
+ ArrayList* retained_maps = heap()->retained_maps(); |
+ int length = retained_maps->Length(); |
+ int new_length = 0; |
+ for (int i = 0; i < length; i += 2) { |
+ DCHECK(retained_maps->Get(i)->IsWeakCell()); |
+ WeakCell* cell = WeakCell::cast(retained_maps->Get(i)); |
+ if (cell->cleared()) continue; |
+ int age = Smi::cast(retained_maps->Get(i + 1))->value(); |
+ int new_age; |
+ Map* map = Map::cast(cell->value()); |
+ MarkBit map_mark = Marking::MarkBitFrom(map); |
+ if (Marking::IsWhite(map_mark)) { |
+ if (age == 0) { |
+ // The map has aged. Do not retain this map. |
+ continue; |
+ } |
+ Object* constructor = map->GetConstructor(); |
+ if (!constructor->IsHeapObject() || Marking::IsWhite(Marking::MarkBitFrom( |
+ HeapObject::cast(constructor)))) { |
+ // The constructor is dead, no new objects with this map can |
+ // be created. Do not retain this map. |
+ continue; |
+ } |
+ Object* prototype = map->prototype(); |
+ if (prototype->IsHeapObject() && |
+ Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) { |
+ // The prototype is not marked, age the map. |
+ new_age = age - 1; |
+ } else { |
+ // The prototype and the constructor are marked, this map keeps only |
+ // transition tree alive, not JSObjects. Do not age the map. |
+ new_age = age; |
+ } |
+ MarkObject(map, map_mark); |
+ } else { |
+ new_age = FLAG_retain_maps_for_n_gc; |
+ } |
+ if (i != new_length) { |
+ retained_maps->Set(new_length, cell); |
+ Object** slot = retained_maps->Slot(new_length); |
+ RecordSlot(retained_maps, slot, cell); |
+ retained_maps->Set(new_length + 1, Smi::FromInt(new_age)); |
+ } else if (new_age != age) { |
+ retained_maps->Set(new_length + 1, Smi::FromInt(new_age)); |
+ } |
+ new_length += 2; |
+ } |
+ Object* undefined = heap()->undefined_value(); |
+ for (int i = new_length; i < length; i++) { |
+ retained_maps->Clear(i, undefined); |
+ } |
+ if (new_length != length) retained_maps->SetLength(new_length); |
+ ProcessMarkingDeque(); |
+} |
+ |
+ |
+void MarkCompactCollector::EnsureMarkingDequeIsReserved() { |
+ DCHECK(!marking_deque_.in_use()); |
+ if (marking_deque_memory_ == NULL) { |
+ marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize); |
+ marking_deque_memory_committed_ = 0; |
+ } |
+ if (marking_deque_memory_ == NULL) { |
+ V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved"); |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) { |
+ // If the marking deque is too small, we try to allocate a bigger one. |
+ // If that fails, make do with a smaller one. |
+ CHECK(!marking_deque_.in_use()); |
+ for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) { |
+ base::VirtualMemory* memory = marking_deque_memory_; |
+ size_t currently_committed = marking_deque_memory_committed_; |
+ |
+ if (currently_committed == size) return; |
+ |
+ if (currently_committed > size) { |
+ bool success = marking_deque_memory_->Uncommit( |
+ reinterpret_cast<Address>(marking_deque_memory_->address()) + size, |
+ currently_committed - size); |
+ if (success) { |
+ marking_deque_memory_committed_ = size; |
+ return; |
+ } |
+ UNREACHABLE(); |
+ } |
+ |
+ bool success = memory->Commit( |
+ reinterpret_cast<Address>(memory->address()) + currently_committed, |
+ size - currently_committed, |
+ false); // Not executable. |
+ if (success) { |
+ marking_deque_memory_committed_ = size; |
+ return; |
+ } |
+ } |
+ V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted"); |
+} |
+ |
+ |
+void MarkCompactCollector::InitializeMarkingDeque() { |
+ DCHECK(!marking_deque_.in_use()); |
+ DCHECK(marking_deque_memory_committed_ > 0); |
+ Address addr = static_cast<Address>(marking_deque_memory_->address()); |
+ size_t size = marking_deque_memory_committed_; |
+ if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; |
+ marking_deque_.Initialize(addr, addr + size); |
+} |
+ |
+ |
+void MarkingDeque::Initialize(Address low, Address high) { |
+ DCHECK(!in_use_); |
+ HeapObject** obj_low = reinterpret_cast<HeapObject**>(low); |
+ HeapObject** obj_high = reinterpret_cast<HeapObject**>(high); |
+ array_ = obj_low; |
+ mask_ = base::bits::RoundDownToPowerOfTwo32( |
+ static_cast<uint32_t>(obj_high - obj_low)) - |
+ 1; |
+ top_ = bottom_ = 0; |
+ overflowed_ = false; |
+ in_use_ = true; |
+} |
+ |
+ |
+void MarkingDeque::Uninitialize(bool aborting) { |
+ if (!aborting) { |
+ DCHECK(IsEmpty()); |
+ DCHECK(!overflowed_); |
+ } |
+ DCHECK(in_use_); |
+ top_ = bottom_ = 0xdecbad; |
+ in_use_ = false; |
+} |
+ |
+ |
+void MarkCompactCollector::MarkLiveObjects() { |
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK); |
+ double start_time = 0.0; |
+ if (FLAG_print_cumulative_gc_stat) { |
+ start_time = base::OS::TimeCurrentMillis(); |
+ } |
+ // The recursive GC marker detects when it is nearing stack overflow, |
+ // and switches to a different marking system. JS interrupts interfere |
+ // with the C stack limit check. |
+ PostponeInterruptsScope postpone(isolate()); |
+ |
+ { |
+ GCTracer::Scope gc_scope(heap()->tracer(), |
+ GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL); |
+ IncrementalMarking* incremental_marking = heap_->incremental_marking(); |
+ if (was_marked_incrementally_) { |
+ incremental_marking->Finalize(); |
+ } else { |
+ // Abort any pending incremental activities e.g. incremental sweeping. |
+ incremental_marking->Stop(); |
+ if (marking_deque_.in_use()) { |
+ marking_deque_.Uninitialize(true); |
+ } |
+ } |
+ } |
+ |
+#ifdef DEBUG |
+ DCHECK(state_ == PREPARE_GC); |
+ state_ = MARK_LIVE_OBJECTS; |
+#endif |
+ |
+ EnsureMarkingDequeIsCommittedAndInitialize( |
+ MarkCompactCollector::kMaxMarkingDequeSize); |
+ |
+ { |
+ GCTracer::Scope gc_scope(heap()->tracer(), |
+ GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH); |
+ PrepareForCodeFlushing(); |
+ } |
+ |
+ RootMarkingVisitor root_visitor(heap()); |
+ |
+ { |
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOT); |
+ MarkRoots(&root_visitor); |
+ } |
+ |
+ { |
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_TOPOPT); |
+ ProcessTopOptimizedFrame(&root_visitor); |
+ } |
+ |
+ // Retaining dying maps should happen before or during ephemeral marking |
+ // because a map could keep the key of an ephemeron alive. Note that map |
+ // aging is imprecise: maps that are kept alive only by ephemerons will age. |
+ { |
+ GCTracer::Scope gc_scope(heap()->tracer(), |
+ GCTracer::Scope::MC_MARK_RETAIN_MAPS); |
+ RetainMaps(); |
+ } |
+ |
+ { |
+ GCTracer::Scope gc_scope(heap()->tracer(), |
+ GCTracer::Scope::MC_MARK_WEAK_CLOSURE); |
+ |
+ // The objects reachable from the roots are marked, yet unreachable |
+ // objects are unmarked. Mark objects reachable due to host |
+ // application specific logic or through Harmony weak maps. |
+ ProcessEphemeralMarking(&root_visitor, false); |
+ |
+ // The objects reachable from the roots, weak maps or object groups |
+ // are marked. Objects pointed to only by weak global handles cannot be |
+ // immediately reclaimed. Instead, we have to mark them as pending and mark |
+ // objects reachable from them. |
+ // |
+ // First we identify nonlive weak handles and mark them as pending |
+ // destruction. |
+ heap()->isolate()->global_handles()->IdentifyWeakHandles( |
+ &IsUnmarkedHeapObject); |
+ // Then we mark the objects. |
+ heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); |
+ ProcessMarkingDeque(); |
+ |
+ // Repeat Harmony weak maps marking to mark unmarked objects reachable from |
+ // the weak roots we just marked as pending destruction. |
+ // |
+ // We only process harmony collections, as all object groups have been fully |
+ // processed and no weakly reachable node can discover new objects groups. |
+ ProcessEphemeralMarking(&root_visitor, true); |
+ } |
+ |
+ AfterMarking(); |
+ |
+ if (FLAG_print_cumulative_gc_stat) { |
+ heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time); |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::AfterMarking() { |
+ { |
+ GCTracer::Scope gc_scope(heap()->tracer(), |
+ GCTracer::Scope::MC_MARK_STRING_TABLE); |
+ |
+ // Prune the string table removing all strings only pointed to by the |
+ // string table. Cannot use string_table() here because the string |
+ // table is marked. |
+ StringTable* string_table = heap()->string_table(); |
+ InternalizedStringTableCleaner internalized_visitor(heap()); |
+ string_table->IterateElements(&internalized_visitor); |
+ string_table->ElementsRemoved(internalized_visitor.PointersRemoved()); |
+ |
+ ExternalStringTableCleaner external_visitor(heap()); |
+ heap()->external_string_table_.Iterate(&external_visitor); |
+ heap()->external_string_table_.CleanUp(); |
+ } |
+ |
+ { |
+ GCTracer::Scope gc_scope(heap()->tracer(), |
+ GCTracer::Scope::MC_MARK_WEAK_REFERENCES); |
+ |
+ // Process the weak references. |
+ MarkCompactWeakObjectRetainer mark_compact_object_retainer; |
+ heap()->ProcessAllWeakReferences(&mark_compact_object_retainer); |
+ } |
+ |
+ { |
+ GCTracer::Scope gc_scope(heap()->tracer(), |
+ GCTracer::Scope::MC_MARK_GLOBAL_HANDLES); |
+ |
+ // Remove object groups after marking phase. |
+ heap()->isolate()->global_handles()->RemoveObjectGroups(); |
+ heap()->isolate()->global_handles()->RemoveImplicitRefGroups(); |
+ } |
+ |
+ // Flush code from collected candidates. |
+ if (is_code_flushing_enabled()) { |
+ GCTracer::Scope gc_scope(heap()->tracer(), |
+ GCTracer::Scope::MC_MARK_CODE_FLUSH); |
+ code_flusher_->ProcessCandidates(); |
+ } |
+ |
+ if (FLAG_track_gc_object_stats) { |
+ if (FLAG_trace_gc_object_stats) { |
+ heap()->object_stats_->TraceObjectStats(); |
+ } |
+ heap()->object_stats_->CheckpointObjectStats(); |
} |
} |