Index: src/heap/mark-compact.cc |
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc |
index 847bdac1263db4c9de13aa624e934bb5edcbcdba..1ffd6c80654eb28ed039dd353311831fc1d0c219 100644 |
--- a/src/heap/mark-compact.cc |
+++ b/src/heap/mark-compact.cc |
@@ -50,6 +50,8 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) |
evacuation_(false), |
migration_slots_buffer_(NULL), |
heap_(heap), |
+ marking_deque_memory_(NULL), |
+ marking_deque_memory_committed_(false), |
code_flusher_(NULL), |
have_code_to_deoptimize_(false) { |
} |
@@ -233,7 +235,10 @@ void MarkCompactCollector::SetUp() { |
} |
-void MarkCompactCollector::TearDown() { AbortCompaction(); } |
+void MarkCompactCollector::TearDown() { |
+ AbortCompaction(); |
+ delete marking_deque_memory_; |
+} |
void MarkCompactCollector::AddEvacuationCandidate(Page* p) { |
@@ -252,7 +257,7 @@ static void TraceFragmentation(PagedSpace* space) { |
} |
-bool MarkCompactCollector::StartCompaction(CompactionMode mode) { |
+bool MarkCompactCollector::StartCompaction(MarkingMode mode) { |
if (!compacting_) { |
DCHECK(evacuation_candidates_.length() == 0); |
@@ -264,8 +269,8 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) { |
CollectEvacuationCandidates(heap()->old_pointer_space()); |
CollectEvacuationCandidates(heap()->old_data_space()); |
- if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION || |
- FLAG_incremental_code_compaction)) { |
+ if (FLAG_compact_code_space && |
+ (mode == NON_INCREMENTAL || FLAG_incremental_code_compaction)) { |
CollectEvacuationCandidates(heap()->code_space()); |
} else if (FLAG_trace_fragmentation) { |
TraceFragmentation(heap()->code_space()); |
@@ -835,7 +840,7 @@ void MarkCompactCollector::Prepare() { |
// Don't start compaction if we are in the middle of incremental |
// marking cycle. We did not collect any slots. |
if (!FLAG_never_compact && !was_marked_incrementally_) { |
- StartCompaction(NON_INCREMENTAL_COMPACTION); |
+ StartCompaction(NON_INCREMENTAL); |
} |
PagedSpaces spaces(heap()); |
@@ -1680,7 +1685,7 @@ class RootMarkingVisitor : public ObjectVisitor { |
// Mark all the objects reachable from the map and body. May leave |
// overflowed objects in the heap. |
- collector_->EmptyMarkingDeque(); |
+ collector_->EmptyMarkingDeque<MarkCompactCollector::NON_INCREMENTAL>(); |
} |
MarkCompactCollector* collector_; |
@@ -1957,7 +1962,7 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { |
// There may be overflowed objects in the heap. Visit them now. |
while (marking_deque_.overflowed()) { |
RefillMarkingDeque(); |
- EmptyMarkingDeque(); |
+ EmptyMarkingDeque<MarkCompactCollector::NON_INCREMENTAL>(); |
} |
} |
@@ -2008,18 +2013,34 @@ void MarkCompactCollector::MarkWeakObjectToCodeTable() { |
// Before: the marking stack contains zero or more heap object pointers. |
// After: the marking stack is empty, and all objects reachable from the |
// marking stack have been marked, or are overflowed in the heap. |
+template void MarkCompactCollector::EmptyMarkingDeque< |
+ MarkCompactCollector::INCREMENTAL>(); |
+template void MarkCompactCollector::EmptyMarkingDeque< |
+ MarkCompactCollector::NON_INCREMENTAL>(); |
+ |
+template <MarkCompactCollector::MarkingMode mode> |
void MarkCompactCollector::EmptyMarkingDeque() { |
+ Map* filler_map = heap_->one_pointer_filler_map(); |
while (!marking_deque_.IsEmpty()) { |
HeapObject* object = marking_deque_.Pop(); |
+ // Explicitly skip one word fillers. Incremental markbit patterns are |
+ // correct only for objects that occupy at least two words. |
+ Map* map = object->map(); |
+ if (map == filler_map) continue; |
+ |
DCHECK(object->IsHeapObject()); |
DCHECK(heap()->Contains(object)); |
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
+ DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object))); |
jochen (gone - plz use gerrit)
2014/12/01 14:32:04
why was this inverted?
Hannes Payer (out of office)
2014/12/01 14:53:23
Because it can be grey or black.
|
- Map* map = object->map(); |
MarkBit map_mark = Marking::MarkBitFrom(map); |
MarkObject(map, map_mark); |
- MarkCompactMarkingVisitor::IterateBody(map, object); |
+ if (mode == NON_INCREMENTAL) { |
+ MarkCompactMarkingVisitor::IterateBody(map, object); |
+ } else { |
+ heap_->incremental_marking()->VisitObject(map, object, |
+ object->SizeFromMap(map)); |
+ } |
} |
} |
@@ -2068,10 +2089,10 @@ void MarkCompactCollector::RefillMarkingDeque() { |
// pointers. After: the marking stack is empty and there are no overflowed |
// objects in the heap. |
void MarkCompactCollector::ProcessMarkingDeque() { |
- EmptyMarkingDeque(); |
+ EmptyMarkingDeque<MarkCompactCollector::NON_INCREMENTAL>(); |
while (marking_deque_.overflowed()) { |
RefillMarkingDeque(); |
- EmptyMarkingDeque(); |
+ EmptyMarkingDeque<MarkCompactCollector::NON_INCREMENTAL>(); |
} |
} |
@@ -2110,6 +2131,43 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) { |
} |
+void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize() { |
+ if (marking_deque_memory_ == NULL) { |
+ marking_deque_memory_ = new base::VirtualMemory(4 * MB); |
+ } |
+ if (!marking_deque_memory_committed_) { |
+ bool success = marking_deque_memory_->Commit( |
+ reinterpret_cast<Address>(marking_deque_memory_->address()), |
+ marking_deque_memory_->size(), |
+ false); // Not executable. |
+ CHECK(success); |
+ marking_deque_memory_committed_ = true; |
+ InitializeMarkingDeque(); |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::InitializeMarkingDeque() { |
+ if (marking_deque_memory_committed_) { |
+ Address addr = static_cast<Address>(marking_deque_memory_->address()); |
+ size_t size = marking_deque_memory_->size(); |
+ if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; |
+ marking_deque_.Initialize(addr, addr + size); |
+ } |
+} |
+ |
+ |
+void MarkCompactCollector::UncommitMarkingDeque() { |
+ if (marking_deque_memory_committed_) { |
+ bool success = marking_deque_memory_->Uncommit( |
+ reinterpret_cast<Address>(marking_deque_memory_->address()), |
+ marking_deque_memory_->size()); |
+ CHECK(success); |
+ marking_deque_memory_committed_ = false; |
+ } |
+} |
+ |
+ |
void MarkCompactCollector::MarkLiveObjects() { |
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK); |
double start_time = 0.0; |
@@ -2121,42 +2179,21 @@ void MarkCompactCollector::MarkLiveObjects() { |
// with the C stack limit check. |
PostponeInterruptsScope postpone(isolate()); |
- bool incremental_marking_overflowed = false; |
IncrementalMarking* incremental_marking = heap_->incremental_marking(); |
if (was_marked_incrementally_) { |
- // Finalize the incremental marking and check whether we had an overflow. |
- // Both markers use grey color to mark overflowed objects so |
- // non-incremental marker can deal with them as if overflow |
- // occured during normal marking. |
- // But incremental marker uses a separate marking deque |
- // so we have to explicitly copy its overflow state. |
incremental_marking->Finalize(); |
- incremental_marking_overflowed = |
- incremental_marking->marking_deque()->overflowed(); |
- incremental_marking->marking_deque()->ClearOverflowed(); |
} else { |
// Abort any pending incremental activities e.g. incremental sweeping. |
incremental_marking->Abort(); |
+ InitializeMarkingDeque(); |
} |
#ifdef DEBUG |
DCHECK(state_ == PREPARE_GC); |
state_ = MARK_LIVE_OBJECTS; |
#endif |
- // The to space contains live objects, a page in from space is used as a |
- // marking stack. |
- Address marking_deque_start = heap()->new_space()->FromSpacePageLow(); |
- Address marking_deque_end = heap()->new_space()->FromSpacePageHigh(); |
- if (FLAG_force_marking_deque_overflows) { |
- marking_deque_end = marking_deque_start + 64 * kPointerSize; |
- } |
- marking_deque_.Initialize(marking_deque_start, marking_deque_end); |
- DCHECK(!marking_deque_.overflowed()); |
- if (incremental_marking_overflowed) { |
- // There are overflowed objects left in the heap after incremental marking. |
- marking_deque_.SetOverflowed(); |
- } |
+ EnsureMarkingDequeIsCommittedAndInitialize(); |
PrepareForCodeFlushing(); |
@@ -2210,7 +2247,7 @@ void MarkCompactCollector::MarkLiveObjects() { |
heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); |
while (marking_deque_.overflowed()) { |
RefillMarkingDeque(); |
- EmptyMarkingDeque(); |
+ EmptyMarkingDeque<MarkCompactCollector::NON_INCREMENTAL>(); |
} |
// Repeat host application specific and Harmony weak maps marking to |