Index: src/mark-compact.cc |
diff --git a/src/mark-compact.cc b/src/mark-compact.cc |
index 26f88cfe611ca1bc95cbda7e995489aaf45ab89a..fc261aeedbdad651f66f58367422c79c3f39ee68 100644 |
--- a/src/mark-compact.cc |
+++ b/src/mark-compact.cc |
@@ -46,7 +46,6 @@ bool MarkCompactCollector::force_compaction_ = false; |
bool MarkCompactCollector::compacting_collection_ = false; |
bool MarkCompactCollector::compact_on_next_gc_ = false; |
-int MarkCompactCollector::previous_marked_count_ = 0; |
GCTracer* MarkCompactCollector::tracer_ = NULL; |
@@ -65,6 +64,13 @@ int MarkCompactCollector::live_cell_objects_size_ = 0; |
int MarkCompactCollector::live_lo_objects_size_ = 0; |
#endif |
+// Assert invariant that must hold if marking is done by single thread. |
+#define MARKING_INVARIANT_ST(cond) \ |
+ ASSERT(MarkCompactCollector::NumberOfMarkers() != 1 || (cond)) |
+ |
+// Assert invariant that must hold if marking is done by multiple threads. |
+#define MARKING_INVARIANT_MT(cond) \ |
+ ASSERT(MarkCompactCollector::NumberOfMarkers() == 1 || (cond)) |
void MarkCompactCollector::CollectGarbage() { |
// Make sure that Prepare() has been called. The individual steps below will |
@@ -98,10 +104,6 @@ void MarkCompactCollector::CollectGarbage() { |
Finish(); |
- // Save the count of marked objects remaining after the collection and |
- // null out the GC tracer. |
- previous_marked_count_ = tracer_->marked_count(); |
- ASSERT(previous_marked_count_ == 0); |
tracer_ = NULL; |
} |
@@ -182,6 +184,9 @@ void MarkCompactCollector::Finish() { |
old_gen_recoverable > kFragmentationAllowed) { |
compact_on_next_gc_ = true; |
} |
+ |
+ MARKING_INVARIANT_ST(tracer_->marked_count() == 0); |
+ MARKING_INVARIANT_MT(tracer_->marked_count() >= 0); |
} |
@@ -213,7 +218,755 @@ void MarkCompactCollector::Finish() { |
// and continue with marking. This process repeats until all reachable |
// objects have been marked. |
-static MarkingStack marking_stack; |
+// Lock-free wait-free thread safe fixed size deque with work-stealing support. |
+// |
+// Each marker thread owns one such deque. Owner uses methods PushButtom and |
+// PopButtom to populate and consume the queue. Other threads can use method |
+// PopTop to steal objects from deque. Synchronisation is required only when |
+// popping the last element or stealing from deque. |
+// |
+// This deque was first described in "Thread Scheduling for Multiprogrammed |
+// Multiprocessors" by Arora et al. |
+// |
+// This deque does not own any fixed backing store instead each full GC a part |
+// of from space is attached to it as a backing store. |
+class Deque { |
+ public: |
+ |
+ // Prepare deque for usage in GC cycle: set memory region between high and |
+ // low as a backing store for deque. |
+ void Initialize(Address low, Address high) { |
+ base_ = reinterpret_cast<HeapObject* volatile*>(low); |
+ size_ = (high - low) / sizeof(HeapObject*); |
+ |
+ bot_ = 0; |
+ tagged_top_ = TaggedTop(0, 0).encode(); |
+ } |
+ |
+ inline void PushBottom(HeapObject* obj) { |
+ uint32_t curr_bot = bot_; |
+ base_[curr_bot++] = obj; |
+ bot_ = curr_bot; |
+ } |
+ |
+ inline HeapObject* PopBottom() { |
+ uint32_t curr_bot = bot_; |
+ |
+ if (curr_bot == 0) return NULL; |
+ |
+ curr_bot--; |
+ |
+ bot_ = curr_bot; |
+ |
+ HeapObject* obj = base_[curr_bot]; |
+ TaggedTop old_tagged_top(tagged_top_); |
+ |
+ if (curr_bot > old_tagged_top.top()) return obj; |
+ |
+ bot_ = 0; |
+ |
+ TaggedTop new_tagged_top (0, old_tagged_top.tag() + 1); |
+ |
+ if (curr_bot == old_tagged_top.top()) { |
+ if (AtomicCompareAndSwap(&tagged_top_, |
+ old_tagged_top.encode(), |
+ new_tagged_top.encode())) { |
+ return obj; |
+ } |
+ } |
+ tagged_top_ = new_tagged_top.encode(); |
+ return NULL; |
+ } |
+ |
+ inline HeapObject* PopTop() { |
+ TaggedTop old_tagged_top(tagged_top_); |
+ uint32_t curr_bot = bot_; |
+ |
+ if (curr_bot <= old_tagged_top.top()) return NULL; |
+ HeapObject* obj = base_[old_tagged_top.top()]; |
+ |
+ TaggedTop new_tagged_top(old_tagged_top.top() + 1, old_tagged_top.tag()); |
+ |
+ if (AtomicCompareAndSwap(&tagged_top_, |
+ old_tagged_top.encode(), |
+ new_tagged_top.encode())) { |
+ return obj; |
+ } |
+ |
+ return NULL; |
+ } |
+ |
+ inline bool IsFull() { return bot_ == size_; } |
+ inline bool IsEmpty() { return bot_ == 0; } |
+ |
+ private: |
+ class TaggedTop { |
+ public: |
+ explicit TaggedTop(AtomicWord encoded) |
+ : top_(static_cast<uint32_t>(encoded & kEncodedTopMask)), |
+ tag_(static_cast<uint32_t>(encoded >> kEncodedTagShift)) { |
+ ASSERT(encode() == encoded); |
+ } |
+ |
+ TaggedTop(uint32_t top, uint32_t tag) |
+ : top_(top), tag_(tag) { |
+ } |
+ |
+ |
+ TaggedTop(const TaggedTop& val) : top_(val.top_), tag_(val.tag_) { } |
+ |
+ inline uint32_t top() { return top_; } |
+ |
+ inline uint32_t tag() { return tag_; } |
+ |
+ inline AtomicWord encode() const { |
+ return (static_cast<AtomicWord>(top_) & kEncodedTopMask) | |
+ (static_cast<AtomicWord>(tag_) << kEncodedTagShift); |
+ } |
+ |
+ |
+ private: |
+#ifdef V8_TARGET_ARCH_X64 |
+ static const int kTopBits = 32; |
+#else |
+ static const int kTopBits = 21; |
+#endif |
+ |
+ static const AtomicWord kEncodedTopMask = |
+ (static_cast<AtomicWord>(1) << kTopBits) - 1; |
+ |
+ static const int kEncodedTagShift = kTopBits; |
+ |
+ uint32_t top_; |
+ uint32_t tag_; |
+ }; |
+ |
+ volatile AtomicWord tagged_top_; |
+ volatile Atomic32 bot_; |
+ |
+ HeapObject* volatile* base_; |
+ Atomic32 size_; |
+}; |
+ |
+class Marker; |
+ |
+// Visitor class for marking heap roots. |
+class RootMarkingVisitor : public ObjectVisitor { |
+ public: |
+ explicit RootMarkingVisitor(Marker* marker) : marker_(marker) {} |
+ |
+ void VisitPointer(Object** p) { |
+ MarkObjectByPointer(p); |
+ } |
+ |
+ void VisitPointers(Object** start, Object** end) { |
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
+ } |
+ |
+ private: |
+ void MarkObjectByPointer(Object** p); |
+ |
+ Marker* marker_; |
+}; |
+ |
+// Random number generator using George Marsaglia's MWC algorithm. |
+class Random { |
+ public: |
+ Random() : hi_(0), lo_(0) {} |
+ |
+ uint32_t Next () { |
+ // Initialize seed using the system random(). If one of the seeds |
+ // should ever become zero again, or if random() returns zero, we |
+ // avoid getting stuck with zero bits in hi or lo by re-initializing |
+ // them on demand. |
+ if (hi_ == 0) hi_ = V8::Random(); |
+ if (lo_ == 0) lo_ = V8::Random(); |
+ |
+ // Mix the bits. |
+ hi_ = 36969 * (hi_ & 0xFFFF) + (hi_ >> 16); |
+ lo_ = 18273 * (lo_ & 0xFFFF) + (lo_ >> 16); |
+ return (hi_ << 16) + (lo_ & 0xFFFF); |
+ } |
+ |
+ private: |
+ uint32_t hi_; |
+ uint32_t lo_; |
+}; |
+ |
+// This class is encapsulates strategy for work stealing victim selection. |
+class VictimSelector { |
+ public: |
+ VictimSelector(int owner_id, int count) : owner_id_(owner_id), count_(count) { |
+ } |
+ |
+ int Next() { |
+ // When there is |
+ if (count_ == 2) return 1 - owner_id_; |
+ |
+ return random_.Next() % count_; |
+ } |
+ |
+ private: |
+ int owner_id_; |
+ int count_; |
+ |
+ Random random_; |
+}; |
+ |
+// This class represents worker performing actual marking of heap objects. |
+// One of the markers (returned by method Marker::Sequential() is considered to |
+// be main. Its methods can be only called from the main thread. |
+// This marker's id is equal to kMainThreadMarkerId. |
+// Only this marker is required for marking phase to succeed. |
+// Other markers are considered auxiliary. Each of the auxiliary markers |
+// owns its own thread. |
+class Marker : public Thread { |
+ public: |
+ explicit Marker(int id); |
+ |
+ // Try marking given unmarked object. If marking succeeds push the object |
+ // into deque owned by this marker. |
+ // There are no synchronization and no memory barriers in HeapObject::SetMark |
+ // so two threads competing for the same object might both |
+ // succeed to claim it. |
+ INLINE(void ClaimObject(HeapObject* object)) { |
+ if (SetMark(object)) Push(object); |
+ ASSERT(object->IsMarked()); |
+ } |
+ |
+ // Push given object into deque owned by this marker. |
+ // If there is no space left mark an object as overflowed and |
+ // notify other markers about presence of overflowed objects in the heap. |
+ // This method is not thread safe. It is inteded to be used either by marker |
+ // itself or from situation when no markers are running. |
+ INLINE(void Push(HeapObject* object)) { |
+ if (!q_.IsFull()) { |
+ q_.PushBottom(object); |
+ } else { |
+ object->SetOverflow(); |
+ SetOverflowedState(); |
+ } |
+ } |
+ |
+ // Try marking given object. |
+ // There are no synchronization and no memory barriers in HeapObject::SetMark |
+ // so two threads competing for the same object might both |
+ // succeed to claim it. |
+ MUST_USE_RESULT INLINE(bool SetMark(HeapObject* obj)) { |
+ if (obj->SetMark()) { |
+#ifdef DEBUG |
+ IncrementMarkedCount(); |
+ MarkCompactCollector::UpdateLiveObjectCount(obj); |
+#endif |
+ return true; |
+ } |
+ return false; |
+ } |
+ |
+ // Try marking given object exclusively. If several threads are trying to |
+ // mark the same unmarked objects exclusively only one will succeed. |
+ MUST_USE_RESULT INLINE(bool SetMarkExclusively(HeapObject* obj)) { |
+ if (obj->SetMarkExclusively()) { |
+#ifdef DEBUG |
+ IncrementMarkedCount(); |
+ MarkCompactCollector::UpdateLiveObjectCount(obj); |
+#endif |
+ return true; |
+ } |
+ return false; |
+ } |
+ |
+ // Scan body of the object for pointers. |
+ void ProcessObject(HeapObject* object); |
+ |
+ // Transitively mark all objects reachable from those pushed into the deque. |
+ // Might leave overflowed objects in the heap. |
+ void TransitiveClosure(); |
+ |
+ // Scan heap for objects mark as overflowed and mark objects reachable from |
+ // them until there is no overflowed objects left in the heap. |
+ void ProcessOverflowedObjects(); |
+ |
+ // Mark objects reachable from roots. This method is thread safe. |
+ // All roots are separated into several groups. Different markers |
+ // compete for these groups. Markers that do not have work try |
+ // stealing it from those markers that have it. |
+ // |
+ // When this method returns the following holds: |
+ // - all object reachable from strong roots are marked; |
+ // - there are no overflowed objects in the heap; |
+ // - all markers have an empty deques. |
+ void MarkRoots(); |
+ |
+ // Initiate marking of objects reachable from roots. |
+ // Wake up all marker threads and comence marking of objects |
+ // reachable from strong roots. |
+ // When this method returns the same invariants hold as for |
+ // MarkRoots() method. |
+ static void MarkRootsPar(); |
+ |
+#ifdef DEBUG |
+ // Atomically increment count of objects marked in the heap. |
+ // There is no syncronization in SetMark() method so this count |
+ // can be greater than actual number of alive objects. |
+ static void IncrementMarkedCount(); |
+#endif |
+ |
+ // Select a random victim marker and if his deque is not empty |
+ // try stealing an object from it. |
+ // Stealing succeeds return stolen object otherwise return NULL. |
+ HeapObject* TryStealingObject(); |
+ |
+ // Returns true if there are markers with potentially non-empty |
+ // marking stacks. |
+ INLINE(bool SomebodyHasWork()) { return working_markers_ != 0; } |
+ |
+ // Raise the flag indicating that this marker potentially has non-empty |
+ // deque. |
+ INLINE(void SignalHasWork()) { |
+ if ((working_markers_ & (1u << id_)) == 0) { |
+ AtomicOr(&working_markers_, 1u << id_); |
+ } |
+ } |
+ |
+ // Clear the flag indicating that this marker potentially has non-empty |
+ // deque. |
+ INLINE(void SignalNoWork()) { |
+ if ((working_markers_ & (1u << id_)) != 0) { |
+ AtomicAnd(&working_markers_, ~(1u << id_)); |
+ } |
+ } |
+ |
+ // Returns true if heap potentially contains overflowed objects. |
+ INLINE(bool Overflowed()) { return overflowed_state_ != 0; } |
+ |
+ // Mark all spaces as potentially containing overflowed objects. |
+ INLINE(void SetOverflowedState()) { |
+ static const Atomic32 OVERFLOWED = (1 << (LAST_SPACE + 1)) - 1; |
+ if (overflowed_state_ != OVERFLOWED) { |
+ AtomicOr(&overflowed_state_, OVERFLOWED); |
+ } |
+ } |
+ |
+ // Body of the thread owned by each auxiliary marker. |
+ // Auxiliary markers sleep and wait for wakeup signal to be raised |
+ // by main thread. After awakening they proceed to mark objects, |
+ // see MarkRoots method, when all objects were marked they go back to |
+ // sleep until next GC. |
+ virtual void Run(); |
+ |
+ // Prepare markers for marking. Use memory between low and high as |
+ // backing store for markers' deques. |
+ static void Initialize(Address low, Address high); |
+ |
+ // Return main marker. |
+ static Marker* Sequential() { return markers_[0]; } |
+ |
+ // Return RootMarkingVisitor associated with this marker. |
+ inline ObjectVisitor* root_visitor() { return &root_visitor_; } |
+ |
+ // Mark objects reachable from object groups. |
+ // Parallel marking of object groups are not supported. |
+ // This method can only be called from main thread. |
+ // Returns true if there were any objects marked. |
+ bool ProcessObjectGroups(); |
+ |
+ // Auxiliary method which can be used to evenly seed deques of sleeping |
+ // markers. |
+ static void ClaimObjectByOneOfMarkers(HeapObject* object) { |
+ ASSERT(working_markers_ == 0); |
+ markers_[current_marker_]->ClaimObject(object); |
+ current_marker_ = (current_marker_ + 1) % |
+ MarkCompactCollector::NumberOfMarkers(); |
+ } |
+ |
+ private: |
+ // Use memory region between low and high as a backing store for |
+ // deque owned by this marker. |
+ void InitializeDeque(Address low, Address high); |
+ |
+ // Use iterator it to scan for overflowed objects in the heap. |
+ // Try pushing found objects onto deque and clear overflow mark |
+ // if push succeeded. |
+ template<class T> void ScanOverflowedObjects(T* it); |
+ |
+ // Scan heap for overflowed objects and push found objects |
+ // onto deque. |
+ // Scanning of different spaces can be performed |
+ // by different markers in parallel. |
+ // Might leave some overflowed objects in the heap. |
+ void ScanOverflowedObjects(); |
+ |
+ enum AllocationSpaceScanningState { AVAILABLE, UNDER_SCAN }; |
+ |
+ // Try claiming allocation space for scanning by this marker. |
+ // On success clear flag indicating possibility of overflowed objects |
+ // in this space. |
+ // Return true if claimed space successfully. |
+ inline bool BeginScanOverflowedIn(AllocationSpace space) { |
+ if (((overflowed_state_ & (1 << space)) != 0) && |
+ AtomicCompareAndSwap(&spaces_state_[space], AVAILABLE, UNDER_SCAN)) { |
+ AtomicAnd(&overflowed_state_, ~(1u << space)); |
+ return true; |
+ } |
+ return false; |
+ } |
+ |
+ // Release allocation space marker were scanning. |
+ // If deque of this marker is full then some overflowed objects might |
+ // have remain unscanned in the space. In this case raise flag |
+ // indicating this and return false. |
+ // Otherwise return true. |
+ inline bool EndScanOverflowedIn(AllocationSpace space) { |
+ bool overflowed = q_.IsFull(); |
+ if (overflowed) AtomicOr(&overflowed_state_, 1u << space); |
+ spaces_state_[space] = AVAILABLE; |
+ return !overflowed; |
+ } |
+ |
+ // Mark symbol table and all objects reachable from its prefix. |
+ void MarkSymbolTable(); |
+ |
+ // Find all unprocessed object groups containing at least one marked object. |
+ // Mark unmarked objects belonging to this groups and push them on the stack. |
+ bool MarkObjectGroups(); |
+ |
+ // Raise the flag indicating that this marker left work stealing loop. |
+ // It is safe to continue marking phase (e.g. processing of object groups) |
+ // only after all auxiliary markers leave it. |
+ INLINE(void SignalSleeping()) { |
+ if ((sleeping_markers_ & (1u << id_)) == 0) { |
+ AtomicOr(&sleeping_markers_, 1u << id_); |
+ } |
+ } |
+ |
+ |
+ uint32_t id_; |
+ |
+ VictimSelector victim_; |
+ |
+ RootMarkingVisitor root_visitor_; |
+ |
+ // Semaphore which is used to notify auxiliary markers about start of marking |
+ // phase. |
+ Semaphore* wakeup_signal_; |
+ |
+ // Marking deque. Contains grey objects that should be scanned by the marker. |
+ Deque q_; |
+ |
+ // Id of the marker which does not own a thread and performs marking |
+ // from the main thread. |
+ static const uint32_t kMainThreadMarkerId = 0; |
+ |
+ static const int kMaxNumberOfMarkers = 32; |
+ |
+ // Id of marker that will receive next objects during balanced seeding of |
+ // deques. See ClaimObjectByOneOfMarkers method. |
+ static uint32_t current_marker_; |
+ |
+ // Bitvector containing per space overflow state. |
+ // An allocation space can contain overflowed objects if and only if a bit |
+ // corresponding to space is set. Mapping from allocation spaces to bits |
+ // is defined through AllocationSpace enumeration. |
+ static volatile Atomic32 overflowed_state_; |
+ |
+ // When markers scan heap for overflowed objects they compete to acquire |
+ // exclusive scanning access to each space. This array is used |
+ // for syncronization. See BeginScanOverflowedIn/EndScanOverflowedIn |
+ // methods for more details. |
+ static Atomic32 spaces_state_[LAST_SPACE + 1]; |
+ |
+ // Bitvector containing information about working markers. |
+ // If i'th bit is set then i'th marker possibly has non empty |
+ // deque. |
+ static volatile Atomic32 working_markers_; |
+ |
+ // Bitvector containing information about sleeping markers. |
+ // Used to implement a barrier inside MarkRootPar() method. |
+ // If i'th bit is set then i'th marker left work stealing loop. |
+ static volatile Atomic32 sleeping_markers_; |
+ |
+ static Marker* markers_[kMaxNumberOfMarkers]; |
+}; |
+ |
+ |
+#ifdef DEBUG |
+void Marker::IncrementMarkedCount() { |
+ MarkCompactCollector::tracer()->increment_marked_count(); |
+} |
+#endif |
+ |
+ |
+void Marker::MarkRootsPar() { |
+ // Signal that all markers have work. |
+ const Atomic32 all_markers_mask = |
+ (1 << MarkCompactCollector::NumberOfMarkers()) - 1; |
+ |
+ sleeping_markers_ = 0; |
+ working_markers_ = all_markers_mask; |
+ |
+ // Wake up auxiliary markers. |
+ for (int i = 1; i < MarkCompactCollector::NumberOfMarkers(); i++) { |
+ markers_[i]->wakeup_signal_->Signal(); |
+ } |
+ |
+ Thread::YieldCPU(); |
+ |
+ // Commence marking. |
+ markers_[0]->MarkRoots(); |
+ |
+ // When MarkRoots returns there should be no working markers. |
+ ASSERT(working_markers_ == 0); |
+ |
+ // Wait until all markers actually leave their work stealing loops. |
+ while (sleeping_markers_ != all_markers_mask) Thread::YieldCPU(); |
+} |
+ |
+ |
+void Marker::Run() { |
+ // Main marker does not own a thread. |
+ ASSERT(id_ != kMainThreadMarkerId); |
+ |
+ while (true) { |
+ // Wait for wakeup call from main marker. |
+ wakeup_signal_->Wait(); |
+ |
+ // Commence marking. |
+ MarkRoots(); |
+ |
+ // When MarkRoots returns there should be no working markers. |
+ ASSERT(working_markers_ == 0); |
+ } |
+} |
+ |
+ |
+void Marker::TransitiveClosure() { |
+ while (true) { |
+ // Try to get object from local deque. |
+ HeapObject* obj = q_.PopBottom(); |
+ if (obj == NULL) { |
+ ASSERT(q_.IsEmpty()); |
+ return; |
+ } |
+ |
+ // Scan object body. |
+ ProcessObject(obj); |
+ } |
+} |
+ |
+ |
+static int OverflowObjectSize(HeapObject* obj) { |
+ // Recover the normal map pointer, it might be marked as live and |
+ // overflowed. |
+ MapWord map_word = obj->map_word(); |
+ map_word.ClearMark(); |
+ map_word.ClearOverflow(); |
+ return obj->SizeFromMap(map_word.ToMap()); |
+} |
+ |
+ |
+// Fill the marking stack with overflowed objects returned by the given |
+// iterator. Stop when the marking stack is filled or the end of the space |
+// is reached, whichever comes first. |
+template<class T> |
+void Marker::ScanOverflowedObjects(T* it) { |
+ // The caller should ensure that the marking stack is initially not full, |
+ // so that we don't waste effort pointlessly scanning for objects. |
+ ASSERT(!q_.IsFull()); |
+ |
+ for (HeapObject* object = it->next(); object != NULL; object = it->next()) { |
+ if (object->IsOverflowed()) { |
+ object->ClearOverflow(); |
+ ASSERT(object->IsMarked()); |
+ ASSERT(Heap::Contains(object)); |
+ q_.PushBottom(object); |
+ if (q_.IsFull()) return; |
+ } |
+ } |
+} |
+ |
+ |
+void Marker::ScanOverflowedObjects() { |
+ if (BeginScanOverflowedIn(NEW_SPACE)) { |
+ SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize); |
+ ScanOverflowedObjects(&new_it); |
+ if (!EndScanOverflowedIn(NEW_SPACE)) return; |
+ } |
+ |
+ if (BeginScanOverflowedIn(OLD_POINTER_SPACE)) { |
+ HeapObjectIterator old_pointer_it(Heap::old_pointer_space(), |
+ &OverflowObjectSize); |
+ ScanOverflowedObjects(&old_pointer_it); |
+ if (!EndScanOverflowedIn(OLD_POINTER_SPACE)) return; |
+ } |
+ |
+ if (BeginScanOverflowedIn(OLD_DATA_SPACE)) { |
+ HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize); |
+ ScanOverflowedObjects(&old_data_it); |
+ if (!EndScanOverflowedIn(OLD_DATA_SPACE)) return; |
+ } |
+ |
+ if (BeginScanOverflowedIn(CODE_SPACE)) { |
+ HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize); |
+ ScanOverflowedObjects(&code_it); |
+ if (!EndScanOverflowedIn(CODE_SPACE)) return; |
+ } |
+ |
+ if (BeginScanOverflowedIn(MAP_SPACE)) { |
+ HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize); |
+ ScanOverflowedObjects(&map_it); |
+ if (!EndScanOverflowedIn(MAP_SPACE)) return; |
+ } |
+ |
+ if (BeginScanOverflowedIn(CELL_SPACE)) { |
+ HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize); |
+ ScanOverflowedObjects(&cell_it); |
+ if (!EndScanOverflowedIn(CELL_SPACE)) return; |
+ } |
+ |
+ if (BeginScanOverflowedIn(LO_SPACE)) { |
+ LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize); |
+ ScanOverflowedObjects(&lo_it); |
+ if (!EndScanOverflowedIn(LO_SPACE)) return; |
+ } |
+} |
+ |
+ |
+void Marker::ProcessOverflowedObjects() { |
+ ASSERT(q_.IsEmpty()); |
+ |
+ if (Overflowed()) { |
+ // Signal that we might soon have objects in our deque. |
+ SignalHasWork(); |
+ |
+ do { |
+ // Fill deque with overflowed objects. |
+ ScanOverflowedObjects(); |
+ |
+ // Empty deque. |
+ TransitiveClosure(); |
+ } while (Overflowed()); |
+ |
+ // Signal that we do not have objects in our deque. |
+ ASSERT(q_.IsEmpty()); |
+ SignalNoWork(); |
+ } |
+} |
+ |
+ |
+void Marker::MarkRoots() { |
+ // Compete with other markers for root groups. |
+ for (int partition = kStrongRootsList; |
+ partition < kNumberOfRootsPartitions; |
+ partition++) { |
+ Heap::ClaimAndIterateStrongRootsGroup( |
+ root_visitor(), |
+ VISIT_ONLY_STRONG, |
+ static_cast<RootsPartition>(partition)); |
+ } |
+ |
+ // Mark SymbolTable and its prefix. |
+ MarkSymbolTable(); |
+ |
+ // Empty deque. |
+ TransitiveClosure(); |
+ |
+ // Signal that we have an empty deque. |
+ ASSERT(q_.IsEmpty()); |
+ SignalNoWork(); |
+ |
+ // While some marker potentially has non-empty deque or |
+ // there are potentially overflowed objects in the heap process this. |
+ while (SomebodyHasWork() || Overflowed()) { |
+ // If there are overflowed objects in the heap help scanning them. |
+ ProcessOverflowedObjects(); |
+ |
+ // If there is no work in local deque steal from other thread. |
+ HeapObject* obj = TryStealingObject(); |
+ |
+ if (obj != NULL) { |
+ // Scan object body and transitively mark objects reachable from it. |
+ ProcessObject(obj); |
+ TransitiveClosure(); |
+ ASSERT(q_.IsEmpty()); |
+ } else { |
+ // We failed to steal work. Our deque is empty. |
+ ASSERT(q_.IsEmpty()); |
+ SignalNoWork(); |
+ } |
+ } |
+ |
+ // Check invariants that must hold on the exit from MarkRoots(). |
+ ASSERT(q_.IsEmpty()); |
+ ASSERT(!Overflowed()); |
+ ASSERT(!SomebodyHasWork()); |
+ |
+ SignalSleeping(); |
+} |
+ |
+ |
+HeapObject* Marker::TryStealingObject() { |
+ YieldCPU(); // Give other threads chance to do something. |
+ |
+ int max_attempts = MarkCompactCollector::NumberOfMarkers() / 2; |
+ for (int attempt = 0; |
+ (attempt < max_attempts) && SomebodyHasWork(); |
+ attempt++) { |
+ Marker* marker = markers_[victim_.Next()]; |
+ if ((marker != this) && !marker->q_.IsEmpty()) { |
+ SignalHasWork(); // We will have work if stealing succeeds. |
+ return marker->q_.PopTop(); |
+ } |
+ } |
+ |
+ return NULL; // All attempts to steal work failed. |
+} |
+ |
+ |
+void Marker::InitializeDeque(Address low, Address high) { |
+ q_.Initialize(low, high); |
+} |
+ |
+ |
+void Marker::Initialize(Address low, Address high) { |
+ if (markers_[0] == NULL) { // Markers were not created. |
+ markers_[0] = new Marker(0); // Create main marker. |
+ |
+ // Create auxiliary markers and start threads owned by them. |
+ for (int i = 1; i < MarkCompactCollector::NumberOfMarkers(); i++) { |
+ markers_[i] = new Marker(i); |
+ markers_[i]->Start(); |
+ } |
+ } |
+ |
+ // Reset state of root groups to UNCLAIMED. |
+ RootsPartitioner::Reset(); |
+ |
+ // Divide memory region between low and high evenly between |
+ // markers. |
+ Address base = low; |
+ intptr_t step = (high - low) / MarkCompactCollector::NumberOfMarkers(); |
+ for (int i = 0; i < MarkCompactCollector::NumberOfMarkers(); i++) { |
+ markers_[i]->InitializeDeque(base, base + step); |
+ base += step; |
+ } |
+ |
+ // Initialize scanning state of spaces. |
+ for (int space = FIRST_SPACE; space <= LAST_SPACE; space++) { |
+ spaces_state_[space] = AVAILABLE; |
+ } |
+} |
+ |
+ |
+volatile Atomic32 Marker::overflowed_state_ = 0; |
+ |
+volatile Atomic32 Marker::working_markers_ = 0; |
+ |
+volatile Atomic32 Marker::sleeping_markers_ = 0; |
+ |
+Marker* Marker::markers_[Marker::kMaxNumberOfMarkers]; |
+ |
+Atomic32 Marker::spaces_state_[LAST_SPACE + 1]; |
static inline HeapObject* ShortCircuitConsString(Object** p) { |
@@ -253,8 +1006,46 @@ static inline HeapObject* ShortCircuitConsString(Object** p) { |
class StaticMarkingVisitor : public StaticVisitorBase { |
public: |
- static inline void IterateBody(Map* map, HeapObject* obj) { |
- table_.GetVisitor(map)(map, obj); |
+ template<typename BodyDescriptor> |
+ class Flexible { |
+ public: |
+ static inline void Visit(Map* map, |
+ HeapObject* object, |
+ Marker* marker) { |
+ int object_size = BodyDescriptor::SizeOf(map, object); |
+ StaticMarkingVisitor::IteratePointers( |
+ object, BodyDescriptor::kStartOffset, object_size, marker); |
+ } |
+ |
+ template<int object_size> |
+ static inline void VisitSpecialized(Map* map, |
+ HeapObject* object, |
+ Marker* marker) { |
+ ASSERT(BodyDescriptor::SizeOf(map, object) == object_size); |
+ StaticMarkingVisitor::IteratePointers( |
+ object, BodyDescriptor::kStartOffset, object_size, marker); |
+ } |
+ }; |
+ |
+ template<typename BodyDescriptor> |
+ class Fixed { |
+ public: |
+ static inline void Visit(Map* map, |
+ HeapObject* object, |
+ Marker* marker) { |
+ StaticMarkingVisitor::IteratePointers(object, |
+ BodyDescriptor::kStartOffset, |
+ BodyDescriptor::kEndOffset, |
+ marker); |
+ } |
+ }; |
+ |
+ |
+ static inline void IterateBody(Map* map, |
+ HeapObject* obj, |
+ Marker* marker) { |
+ ASSERT(map->IsMarked()); |
+ table_.GetVisitor(map)(map, obj, marker); |
} |
static void EnableCodeFlushing(bool enabled) { |
@@ -265,22 +1056,30 @@ class StaticMarkingVisitor : public StaticVisitorBase { |
} |
} |
+ static void SelectMapVisitor() { |
+ if (FLAG_cleanup_caches_in_maps_at_gc && FLAG_collect_maps) { |
+ table_.Register(kVisitMap, &VisitMap<true, true>); |
+ } else if (FLAG_cleanup_caches_in_maps_at_gc && !FLAG_collect_maps) { |
+ table_.Register(kVisitMap, &VisitMap<true, false>); |
+ } else if (!FLAG_cleanup_caches_in_maps_at_gc && FLAG_collect_maps) { |
+ table_.Register(kVisitMap, &VisitMap<false, true>); |
+ } else if (!FLAG_cleanup_caches_in_maps_at_gc && !FLAG_collect_maps) { |
+ table_.Register(kVisitMap, &VisitMap<false, false>); |
+ } else { |
+ UNREACHABLE(); |
+ } |
+ } |
+ |
static void Initialize() { |
table_.Register(kVisitShortcutCandidate, |
- &FixedBodyVisitor<StaticMarkingVisitor, |
- ConsString::BodyDescriptor, |
- void>::Visit); |
+ &Fixed<ConsString::BodyDescriptor>::Visit); |
table_.Register(kVisitConsString, |
- &FixedBodyVisitor<StaticMarkingVisitor, |
- ConsString::BodyDescriptor, |
- void>::Visit); |
+ &Fixed<ConsString::BodyDescriptor>::Visit); |
table_.Register(kVisitFixedArray, |
- &FlexibleBodyVisitor<StaticMarkingVisitor, |
- FixedArray::BodyDescriptor, |
- void>::Visit); |
+ &Flexible<FixedArray::BodyDescriptor>::Visit); |
table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo); |
@@ -289,22 +1088,16 @@ class StaticMarkingVisitor : public StaticVisitorBase { |
table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); |
table_.Register(kVisitOddball, |
- &FixedBodyVisitor<StaticMarkingVisitor, |
- Oddball::BodyDescriptor, |
- void>::Visit); |
- table_.Register(kVisitMap, |
- &FixedBodyVisitor<StaticMarkingVisitor, |
- Map::BodyDescriptor, |
- void>::Visit); |
+ &Fixed<Oddball::BodyDescriptor>::Visit); |
+ |
+ SelectMapVisitor(); |
table_.Register(kVisitCode, &VisitCode); |
table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode); |
table_.Register(kVisitPropertyCell, |
- &FixedBodyVisitor<StaticMarkingVisitor, |
- JSGlobalPropertyCell::BodyDescriptor, |
- void>::Visit); |
+ &Fixed<JSGlobalPropertyCell::BodyDescriptor>::Visit); |
table_.RegisterSpecializations<DataObjectVisitor, |
kVisitDataObject, |
@@ -319,21 +1112,23 @@ class StaticMarkingVisitor : public StaticVisitorBase { |
kVisitStructGeneric>(); |
} |
- INLINE(static void VisitPointer(Object** p)) { |
- MarkObjectByPointer(p); |
+ INLINE(static void VisitPointer(Object** p, Marker* marker)) { |
+ MarkObjectByPointer(p, marker); |
} |
- INLINE(static void VisitPointers(Object** start, Object** end)) { |
+ INLINE(static void VisitPointers(Object** start, |
+ Object** end, |
+ Marker* marker)) { |
// Mark all objects pointed to in [start, end). |
const int kMinRangeForMarkingRecursion = 64; |
if (end - start >= kMinRangeForMarkingRecursion) { |
- if (VisitUnmarkedObjects(start, end)) return; |
+ if (VisitUnmarkedObjects(start, end, marker)) return; |
// We are close to a stack overflow, so just mark the objects. |
} |
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(p, marker); |
} |
- static inline void VisitCodeTarget(RelocInfo* rinfo) { |
+ static inline void VisitCodeTarget(RelocInfo* rinfo, Marker* marker) { |
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); |
Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) { |
@@ -341,42 +1136,46 @@ class StaticMarkingVisitor : public StaticVisitorBase { |
// Please note targets for cleared inline cached do not have to be |
// marked since they are contained in Heap::non_monomorphic_cache(). |
} else { |
- MarkCompactCollector::MarkObject(code); |
+ marker->ClaimObject(code); |
} |
} |
- static inline void VisitDebugTarget(RelocInfo* rinfo) { |
+ static inline void VisitDebugTarget(RelocInfo* rinfo, Marker* marker) { |
ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && |
rinfo->IsPatchedReturnSequence()) || |
(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && |
rinfo->IsPatchedDebugBreakSlotSequence())); |
HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address()); |
- MarkCompactCollector::MarkObject(code); |
+ marker->ClaimObject(code); |
} |
// Mark object pointed to by p. |
- INLINE(static void MarkObjectByPointer(Object** p)) { |
+ INLINE(static void MarkObjectByPointer(Object** p, Marker* marker)) { |
if (!(*p)->IsHeapObject()) return; |
HeapObject* object = ShortCircuitConsString(p); |
- MarkCompactCollector::MarkObject(object); |
+ marker->ClaimObject(object); |
} |
// Visit an unmarked object. |
- static inline void VisitUnmarkedObject(HeapObject* obj) { |
+ static inline void VisitUnmarkedObject(HeapObject* obj, |
+ Marker* marker) { |
#ifdef DEBUG |
ASSERT(Heap::Contains(obj)); |
- ASSERT(!obj->IsMarked()); |
#endif |
Map* map = obj->map(); |
- MarkCompactCollector::SetMark(obj); |
- // Mark the map pointer and the body. |
- MarkCompactCollector::MarkObject(map); |
- IterateBody(map, obj); |
+ if (marker->SetMark(obj)) { |
+ // Mark the map pointer and the body. |
+ ASSERT(map->IsHeapObject()); |
+ marker->ClaimObject(map); |
+ IterateBody(map, obj, marker); |
+ } |
} |
// Visit all unmarked objects pointed to by [start, end). |
// Returns false if the operation fails (lack of stack space). |
- static inline bool VisitUnmarkedObjects(Object** start, Object** end) { |
+ static inline bool VisitUnmarkedObjects(Object** start, |
+ Object** end, |
+ Marker* marker) { |
// Return false is we are close to the stack limit. |
StackLimitCheck check; |
if (check.HasOverflowed()) return false; |
@@ -386,35 +1185,34 @@ class StaticMarkingVisitor : public StaticVisitorBase { |
if (!(*p)->IsHeapObject()) continue; |
HeapObject* obj = HeapObject::cast(*p); |
if (obj->IsMarked()) continue; |
- VisitUnmarkedObject(obj); |
+ VisitUnmarkedObject(obj, marker); |
} |
return true; |
} |
- static inline void VisitExternalReference(Address* p) { } |
- static inline void VisitRuntimeEntry(RelocInfo* rinfo) { } |
+ static inline void VisitExternalReference(Address* p, Marker* marker) { } |
+ static inline void VisitRuntimeEntry(RelocInfo* rinfo, Marker* marker) { } |
private: |
class DataObjectVisitor { |
public: |
template<int size> |
- static void VisitSpecialized(Map* map, HeapObject* object) { |
+ static void VisitSpecialized(Map* map, |
+ HeapObject* object, |
+ Marker* marker) { |
} |
- static void Visit(Map* map, HeapObject* object) { |
+ static void Visit(Map* map, HeapObject* object, Marker* marker) { |
} |
}; |
- typedef FlexibleBodyVisitor<StaticMarkingVisitor, |
- JSObject::BodyDescriptor, |
- void> JSObjectVisitor; |
+ typedef Flexible<JSObject::BodyDescriptor> JSObjectVisitor; |
- typedef FlexibleBodyVisitor<StaticMarkingVisitor, |
- StructBodyDescriptor, |
- void> StructObjectVisitor; |
+ typedef Flexible<StructBodyDescriptor> StructObjectVisitor; |
- static void VisitCode(Map* map, HeapObject* object) { |
- reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(); |
+ static void VisitCode(Map* map, HeapObject* object, Marker* marker) { |
+ reinterpret_cast<Code*>(object)-> |
+ CodeIterateBody<StaticMarkingVisitor, Marker*>(marker); |
} |
// Code flushing support. |
@@ -460,7 +1258,7 @@ class StaticMarkingVisitor : public StaticVisitorBase { |
return; |
} |
- // Code is either on stack or in compilation cache. |
+ // Code is either on stack or in compilation cache or not flushable. |
if (shared_info->unchecked_code()->IsMarked()) { |
shared_info->set_code_age(0); |
return; |
@@ -468,7 +1266,7 @@ class StaticMarkingVisitor : public StaticVisitorBase { |
// The function must be compiled and have the source code available, |
// to be able to recompile it in case we need the function again. |
- if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return; |
+ if (!(IsCompiled(shared_info) && HasSourceCode(shared_info))) return; |
// We never flush code for Api functions. |
Object* function_data = shared_info->function_data(); |
@@ -479,7 +1277,7 @@ class StaticMarkingVisitor : public StaticVisitorBase { |
} |
// Only flush code for functions. |
- if (shared_info->code()->kind() != Code::FUNCTION) return; |
+ if (shared_info->unchecked_code()->kind() != Code::FUNCTION) return; |
// Function must be lazy compilable. |
if (!shared_info->allows_lazy_compilation()) return; |
@@ -534,21 +1332,21 @@ class StaticMarkingVisitor : public StaticVisitorBase { |
} |
- static void VisitSharedFunctionInfo(Map* map, HeapObject* object) { |
+ static void VisitSharedFunctionInfo(Map* map, |
+ HeapObject* object, |
+ Marker* marker) { |
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); |
if (shared->IsInobjectSlackTrackingInProgress()) { |
shared->DetachInitialMap(); |
} |
- FixedBodyVisitor<StaticMarkingVisitor, |
- SharedFunctionInfo::BodyDescriptor, |
- void>::Visit(map, object); |
+ Fixed<SharedFunctionInfo::BodyDescriptor>::Visit(map, object, marker); |
} |
- static void VisitCodeEntry(Address entry_address) { |
+ static void VisitCodeEntry(Address entry_address, Marker* marker) { |
Object* code = Code::GetObjectFromEntryAddress(entry_address); |
Object* old_code = code; |
- VisitPointer(&code); |
+ VisitPointer(&code, marker); |
if (code != old_code) { |
Memory::Address_at(entry_address) = |
reinterpret_cast<Code*>(code)->entry(); |
@@ -556,33 +1354,64 @@ class StaticMarkingVisitor : public StaticVisitorBase { |
} |
- static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) { |
+ static void VisitJSFunctionAndFlushCode(Map* map, |
+ HeapObject* object, |
+ Marker* marker) { |
JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object); |
// The function must have a valid context and not be a builtin. |
if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) { |
FlushCodeForFunction(jsfunction); |
} |
- VisitJSFunction(map, object); |
+ VisitJSFunction(map, object, marker); |
} |
- static void VisitJSFunction(Map* map, HeapObject* object) { |
-#define SLOT_ADDR(obj, offset) \ |
+ static void VisitJSFunction(Map* map, HeapObject* object, Marker* marker) { |
+#define SLOT_ADDR(obj, offset) \ |
reinterpret_cast<Object**>((obj)->address() + offset) |
VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset), |
- SLOT_ADDR(object, JSFunction::kCodeEntryOffset)); |
+ SLOT_ADDR(object, JSFunction::kCodeEntryOffset), |
+ marker); |
- VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset); |
+ VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset, marker); |
VisitPointers(SLOT_ADDR(object, |
JSFunction::kCodeEntryOffset + kPointerSize), |
- SLOT_ADDR(object, JSFunction::kSize)); |
+ SLOT_ADDR(object, JSFunction::kSize), |
+ marker); |
#undef SLOT_ADDR |
} |
+ template<bool cleanup_caches_in_maps_at_gc, bool collect_maps> |
+ static void VisitMap(Map* meta_map, HeapObject* object, Marker* marker) { |
+ Map* map = reinterpret_cast<Map*>(object); |
- typedef void (*Callback)(Map* map, HeapObject* object); |
+ if (cleanup_caches_in_maps_at_gc) map->ClearCodeCache(); |
+ |
+ if (collect_maps && |
+ map->instance_type() >= FIRST_JS_OBJECT_TYPE && |
+ map->instance_type() <= JS_FUNCTION_TYPE) { |
+ MarkCompactCollector::MarkMapContents(map, marker); |
+ } |
+ |
+ Fixed<Map::BodyDescriptor>::Visit(map, object, marker); |
+ } |
+ |
+ INLINE(static void IteratePointers(HeapObject* object, |
+ int start_offset, |
+ int end_offset, |
+ Marker* marker)) { |
+ Object** start_slot = reinterpret_cast<Object**>(object->address() + |
+ start_offset); |
+ Object** end_slot = reinterpret_cast<Object**>(object->address() + |
+ end_offset); |
+ VisitPointers(start_slot, end_slot, marker); |
+ } |
+ |
+ typedef void (*Callback)(Map* map, |
+ HeapObject* object, |
+ Marker* marker); |
static VisitorDispatchTable<Callback> table_; |
}; |
@@ -592,31 +1421,65 @@ VisitorDispatchTable<StaticMarkingVisitor::Callback> |
StaticMarkingVisitor::table_; |
+void Marker::ProcessObject(HeapObject* object) { |
+ ASSERT(object->IsHeapObject()); |
+ ASSERT(Heap::Contains(object)); |
+ ASSERT(object->IsMarked()); |
+ ASSERT(!object->IsOverflowed()); |
+ |
+ // Because the object is marked, we have to recover the original map |
+ // pointer and use it to mark the object's body. |
+ MapWord map_word = object->map_word(); |
+ map_word.ClearMark(); |
+ Map* map = map_word.ToMap(); |
+ ClaimObject(map); |
+ StaticMarkingVisitor::IterateBody(map, object, this); |
+} |
+ |
+ |
class MarkingVisitor : public ObjectVisitor { |
public: |
+ explicit MarkingVisitor(Marker* marker) : marker_(marker) {} |
+ |
void VisitPointer(Object** p) { |
- StaticMarkingVisitor::VisitPointer(p); |
+ StaticMarkingVisitor::VisitPointer(p, marker_); |
} |
void VisitPointers(Object** start, Object** end) { |
- StaticMarkingVisitor::VisitPointers(start, end); |
+ StaticMarkingVisitor::VisitPointers(start, end, marker_); |
} |
void VisitCodeTarget(RelocInfo* rinfo) { |
- StaticMarkingVisitor::VisitCodeTarget(rinfo); |
+ StaticMarkingVisitor::VisitCodeTarget(rinfo, marker_); |
} |
void VisitDebugTarget(RelocInfo* rinfo) { |
- StaticMarkingVisitor::VisitDebugTarget(rinfo); |
+ StaticMarkingVisitor::VisitDebugTarget(rinfo, marker_); |
} |
+ private: |
+ Marker* marker_; |
}; |
+void Marker::MarkSymbolTable() { |
+ SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table(); |
+ |
+ // Mark the symbol table itself. |
+ if (SetMarkExclusively(symbol_table)) { |
+ // Explicitly mark the prefix. |
+ MarkingVisitor marking_visitor(this); |
+ symbol_table->IteratePrefix(&marking_visitor); |
+ } |
+ |
+ ASSERT(symbol_table->IsMarked()); |
+} |
+ |
+ |
class CodeMarkingVisitor : public ThreadVisitor { |
public: |
void VisitThread(ThreadLocalTop* top) { |
for (StackFrameIterator it(top); !it.done(); it.Advance()) { |
- MarkCompactCollector::MarkObject(it.frame()->unchecked_code()); |
+ Marker::ClaimObjectByOneOfMarkers(it.frame()->unchecked_code()); |
} |
} |
}; |
@@ -631,7 +1494,7 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { |
void VisitPointer(Object** slot) { |
Object* obj = *slot; |
if (obj->IsHeapObject()) { |
- MarkCompactCollector::MarkObject(HeapObject::cast(obj)); |
+ Marker::ClaimObjectByOneOfMarkers(HeapObject::cast(obj)); |
} |
} |
}; |
@@ -651,13 +1514,9 @@ void MarkCompactCollector::PrepareForCodeFlushing() { |
#endif |
StaticMarkingVisitor::EnableCodeFlushing(true); |
- // Ensure that empty descriptor array is marked. Method MarkDescriptorArray |
- // relies on it being marked before any other descriptor array. |
- MarkObject(Heap::raw_unchecked_empty_descriptor_array()); |
- |
// Make sure we are not referencing the code from the stack. |
for (StackFrameIterator it; !it.done(); it.Advance()) { |
- MarkObject(it.frame()->unchecked_code()); |
+ Marker::ClaimObjectByOneOfMarkers(it.frame()->unchecked_code()); |
} |
// Iterate the archived stacks in all threads to check if |
@@ -667,43 +1526,32 @@ void MarkCompactCollector::PrepareForCodeFlushing() { |
SharedFunctionInfoMarkingVisitor visitor; |
CompilationCache::IterateFunctions(&visitor); |
- |
- ProcessMarkingStack(); |
} |
+void RootMarkingVisitor::MarkObjectByPointer(Object** p) { |
+ if (!(*p)->IsHeapObject()) return; |
-// Visitor class for marking heap roots. |
-class RootMarkingVisitor : public ObjectVisitor { |
- public: |
- void VisitPointer(Object** p) { |
- MarkObjectByPointer(p); |
- } |
+ // Replace flat cons strings in place. |
+ HeapObject* object = ShortCircuitConsString(p); |
- void VisitPointers(Object** start, Object** end) { |
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
+ Map* map = object->map(); |
+ // Mark the object. |
+ if (marker_->SetMark(object)) { |
+ ASSERT(map->IsHeapObject()); |
+ marker_->ClaimObject(map); |
+ StaticMarkingVisitor::IterateBody(map, object, marker_); |
+ marker_->TransitiveClosure(); |
} |
+} |
- private: |
- void MarkObjectByPointer(Object** p) { |
- if (!(*p)->IsHeapObject()) return; |
- |
- // Replace flat cons strings in place. |
- HeapObject* object = ShortCircuitConsString(p); |
- if (object->IsMarked()) return; |
- |
- Map* map = object->map(); |
- // Mark the object. |
- MarkCompactCollector::SetMark(object); |
- |
- // Mark the map pointer and body, and push them on the marking stack. |
- MarkCompactCollector::MarkObject(map); |
- StaticMarkingVisitor::IterateBody(map, object); |
+uint32_t Marker::current_marker_ = 0; |
- // Mark all the objects reachable from the map and body. May leave |
- // overflowed objects in the heap. |
- MarkCompactCollector::EmptyMarkingStack(); |
- } |
-}; |
+Marker::Marker(int id) |
+ : id_(id), |
+ victim_(id, MarkCompactCollector::NumberOfMarkers()), |
+ root_visitor_(this) { |
+ wakeup_signal_ = OS::CreateSemaphore(0); |
+} |
// Helper class for pruning the symbol table. |
@@ -738,32 +1586,9 @@ class SymbolTableCleaner : public ObjectVisitor { |
}; |
-void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) { |
- ASSERT(!object->IsMarked()); |
- ASSERT(Heap::Contains(object)); |
- if (object->IsMap()) { |
- Map* map = Map::cast(object); |
- if (FLAG_cleanup_caches_in_maps_at_gc) { |
- map->ClearCodeCache(); |
- } |
- SetMark(map); |
- if (FLAG_collect_maps && |
- map->instance_type() >= FIRST_JS_OBJECT_TYPE && |
- map->instance_type() <= JS_FUNCTION_TYPE) { |
- MarkMapContents(map); |
- } else { |
- marking_stack.Push(map); |
- } |
- } else { |
- SetMark(object); |
- marking_stack.Push(object); |
- } |
-} |
- |
- |
-void MarkCompactCollector::MarkMapContents(Map* map) { |
+void MarkCompactCollector::MarkMapContents(Map* map, Marker* marker) { |
MarkDescriptorArray(reinterpret_cast<DescriptorArray*>( |
- *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset))); |
+ *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)), marker); |
// Mark the Object* fields of the Map. |
// Since the descriptor array has been marked already, it is fine |
@@ -773,24 +1598,28 @@ void MarkCompactCollector::MarkMapContents(Map* map) { |
Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); |
- StaticMarkingVisitor::VisitPointers(start_slot, end_slot); |
+ StaticMarkingVisitor::VisitPointers(start_slot, end_slot, marker); |
} |
-void MarkCompactCollector::MarkDescriptorArray( |
- DescriptorArray* descriptors) { |
+void MarkCompactCollector::MarkDescriptorArray(DescriptorArray* descriptors, |
+ Marker* marker) { |
if (descriptors->IsMarked()) return; |
// Empty descriptor array is marked as a root before any maps are marked. |
ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array()); |
- SetMark(descriptors); |
+ |
+ if (!marker->SetMark(descriptors)) return; |
FixedArray* contents = reinterpret_cast<FixedArray*>( |
descriptors->get(DescriptorArray::kContentArrayIndex)); |
+ |
+ MARKING_INVARIANT_ST(!contents->IsMarked() && contents->IsFixedArray()); |
+ |
+ if (!marker->SetMark(contents)) return; |
+ |
ASSERT(contents->IsHeapObject()); |
- ASSERT(!contents->IsMarked()); |
- ASSERT(contents->IsFixedArray()); |
ASSERT(contents->length() >= 2); |
- SetMark(contents); |
+ |
// Contents contains (value, details) pairs. If the details say that |
// the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or |
// NULL_DESCRIPTOR, we don't mark the value as live. Only for |
@@ -802,15 +1631,12 @@ void MarkCompactCollector::MarkDescriptorArray( |
PropertyDetails details(Smi::cast(contents->get(i + 1))); |
if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) { |
HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i)); |
- if (object->IsHeapObject() && !object->IsMarked()) { |
- SetMark(object); |
- marking_stack.Push(object); |
- } |
+ if (object->IsHeapObject()) marker->ClaimObject(object); |
} |
} |
// The DescriptorArray descriptors contains a pointer to its contents array, |
// but the contents array is already marked. |
- marking_stack.Push(descriptors); |
+ marker->Push(descriptors); |
} |
@@ -831,73 +1657,25 @@ void MarkCompactCollector::CreateBackPointers() { |
} |
-static int OverflowObjectSize(HeapObject* obj) { |
- // Recover the normal map pointer, it might be marked as live and |
- // overflowed. |
- MapWord map_word = obj->map_word(); |
- map_word.ClearMark(); |
- map_word.ClearOverflow(); |
- return obj->SizeFromMap(map_word.ToMap()); |
-} |
- |
- |
-// Fill the marking stack with overflowed objects returned by the given |
-// iterator. Stop when the marking stack is filled or the end of the space |
-// is reached, whichever comes first. |
-template<class T> |
-static void ScanOverflowedObjects(T* it) { |
- // The caller should ensure that the marking stack is initially not full, |
- // so that we don't waste effort pointlessly scanning for objects. |
- ASSERT(!marking_stack.is_full()); |
- |
- for (HeapObject* object = it->next(); object != NULL; object = it->next()) { |
- if (object->IsOverflowed()) { |
- object->ClearOverflow(); |
- ASSERT(object->IsMarked()); |
- ASSERT(Heap::Contains(object)); |
- marking_stack.Push(object); |
- if (marking_stack.is_full()) return; |
- } |
- } |
-} |
- |
- |
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { |
return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked(); |
} |
-void MarkCompactCollector::MarkSymbolTable() { |
- SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table(); |
- // Mark the symbol table itself. |
- SetMark(symbol_table); |
- // Explicitly mark the prefix. |
- MarkingVisitor marker; |
- symbol_table->IteratePrefix(&marker); |
- ProcessMarkingStack(); |
-} |
- |
- |
-void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { |
+void MarkCompactCollector::MarkRoots() { |
// Mark the heap roots including global variables, stack variables, |
// etc., and all objects reachable from them. |
- Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG); |
- |
- // Handle the symbol table specially. |
- MarkSymbolTable(); |
- |
- // There may be overflowed objects in the heap. Visit them now. |
- while (marking_stack.overflowed()) { |
- RefillMarkingStack(); |
- EmptyMarkingStack(); |
- } |
+ Marker::MarkRootsPar(); |
} |
-void MarkCompactCollector::MarkObjectGroups() { |
+ |
+bool Marker::MarkObjectGroups() { |
List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups(); |
- for (int i = 0; i < object_groups->length(); i++) { |
+ bool marked = false; |
+ |
+ for (int i = id_; i < object_groups->length(); i++) { |
ObjectGroup* entry = object_groups->at(i); |
if (entry == NULL) continue; |
@@ -916,104 +1694,35 @@ void MarkCompactCollector::MarkObjectGroups() { |
// An object in the group is marked, so mark as gray all white heap |
// objects in the group. |
for (int j = 0; j < objects.length(); ++j) { |
- if ((*objects[j])->IsHeapObject()) { |
- MarkObject(HeapObject::cast(*objects[j])); |
+ HeapObject* object = reinterpret_cast<HeapObject*>(*objects[j]); |
+ if (object->IsHeapObject()) { |
+ marked = marked || !object->IsMarked(); |
+ ClaimObject(HeapObject::cast(*objects[j])); |
} |
} |
+ |
// Once the entire group has been colored gray, set the object group |
// to NULL so it won't be processed again. |
delete object_groups->at(i); |
object_groups->at(i) = NULL; |
} |
-} |
- |
- |
-// Mark all objects reachable from the objects on the marking stack. |
-// Before: the marking stack contains zero or more heap object pointers. |
-// After: the marking stack is empty, and all objects reachable from the |
-// marking stack have been marked, or are overflowed in the heap. |
-void MarkCompactCollector::EmptyMarkingStack() { |
- while (!marking_stack.is_empty()) { |
- HeapObject* object = marking_stack.Pop(); |
- ASSERT(object->IsHeapObject()); |
- ASSERT(Heap::Contains(object)); |
- ASSERT(object->IsMarked()); |
- ASSERT(!object->IsOverflowed()); |
- |
- // Because the object is marked, we have to recover the original map |
- // pointer and use it to mark the object's body. |
- MapWord map_word = object->map_word(); |
- map_word.ClearMark(); |
- Map* map = map_word.ToMap(); |
- MarkObject(map); |
- StaticMarkingVisitor::IterateBody(map, object); |
- } |
+ return marked; |
} |
-// Sweep the heap for overflowed objects, clear their overflow bits, and |
-// push them on the marking stack. Stop early if the marking stack fills |
-// before sweeping completes. If sweeping completes, there are no remaining |
-// overflowed objects in the heap so the overflow flag on the markings stack |
-// is cleared. |
-void MarkCompactCollector::RefillMarkingStack() { |
- ASSERT(marking_stack.overflowed()); |
- |
- SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize); |
- ScanOverflowedObjects(&new_it); |
- if (marking_stack.is_full()) return; |
- |
- HeapObjectIterator old_pointer_it(Heap::old_pointer_space(), |
- &OverflowObjectSize); |
- ScanOverflowedObjects(&old_pointer_it); |
- if (marking_stack.is_full()) return; |
- |
- HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize); |
- ScanOverflowedObjects(&old_data_it); |
- if (marking_stack.is_full()) return; |
- |
- HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize); |
- ScanOverflowedObjects(&code_it); |
- if (marking_stack.is_full()) return; |
- |
- HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize); |
- ScanOverflowedObjects(&map_it); |
- if (marking_stack.is_full()) return; |
+bool Marker::ProcessObjectGroups() { |
+ ASSERT(working_markers_ == 0); |
+ ASSERT(id_ == kMainThreadMarkerId); |
- HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize); |
- ScanOverflowedObjects(&cell_it); |
- if (marking_stack.is_full()) return; |
- |
- LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize); |
- ScanOverflowedObjects(&lo_it); |
- if (marking_stack.is_full()) return; |
- |
- marking_stack.clear_overflowed(); |
-} |
- |
- |
-// Mark all objects reachable (transitively) from objects on the marking |
-// stack. Before: the marking stack contains zero or more heap object |
-// pointers. After: the marking stack is empty and there are no overflowed |
-// objects in the heap. |
-void MarkCompactCollector::ProcessMarkingStack() { |
- EmptyMarkingStack(); |
- while (marking_stack.overflowed()) { |
- RefillMarkingStack(); |
- EmptyMarkingStack(); |
+ bool found_unprocessed_group = false; |
+ while (MarkObjectGroups()) { |
+ TransitiveClosure(); |
+ ProcessOverflowedObjects(); |
+ found_unprocessed_group = true; |
} |
-} |
- |
-void MarkCompactCollector::ProcessObjectGroups() { |
- bool work_to_do = true; |
- ASSERT(marking_stack.is_empty()); |
- while (work_to_do) { |
- MarkObjectGroups(); |
- work_to_do = !marking_stack.is_empty(); |
- ProcessMarkingStack(); |
- } |
+ return found_unprocessed_group; |
} |
@@ -1025,21 +1734,20 @@ void MarkCompactCollector::MarkLiveObjects() { |
#endif |
// The to space contains live objects, the from space is used as a marking |
// stack. |
- marking_stack.Initialize(Heap::new_space()->FromSpaceLow(), |
- Heap::new_space()->FromSpaceHigh()); |
+ Marker::Initialize(Heap::new_space()->FromSpaceLow(), |
+ Heap::new_space()->FromSpaceHigh()); |
+ |
+ // Ensure that empty descriptor array is marked. Method MarkDescriptorArray |
+ // relies on it being marked before any other descriptor array. |
+ Marker::ClaimObjectByOneOfMarkers(Heap::raw_unchecked_empty_descriptor_array()); |
- ASSERT(!marking_stack.overflowed()); |
+ StaticMarkingVisitor::SelectMapVisitor(); |
PrepareForCodeFlushing(); |
- RootMarkingVisitor root_visitor; |
- MarkRoots(&root_visitor); |
+ MarkRoots(); |
- // The objects reachable from the roots are marked, yet unreachable |
- // objects are unmarked. Mark objects reachable from object groups |
- // containing at least one marked object, and continue until no new |
- // objects are reachable from the object groups. |
- ProcessObjectGroups(); |
+ Marker::Sequential()->ProcessObjectGroups(); |
// The objects reachable from the roots or object groups are marked, |
// yet unreachable objects are unmarked. Mark objects reachable |
@@ -1049,15 +1757,12 @@ void MarkCompactCollector::MarkLiveObjects() { |
// destruction. |
GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject); |
// Then we mark the objects and process the transitive closure. |
- GlobalHandles::IterateWeakRoots(&root_visitor); |
- while (marking_stack.overflowed()) { |
- RefillMarkingStack(); |
- EmptyMarkingStack(); |
- } |
+ GlobalHandles::IterateWeakRoots(Marker::Sequential()->root_visitor()); |
+ Marker::Sequential()->ProcessOverflowedObjects(); |
// Repeat the object groups to mark unmarked groups reachable from the |
// weak roots. |
- ProcessObjectGroups(); |
+ Marker::Sequential()->ProcessObjectGroups(); |
// Prune the symbol table removing all symbols only pointed to by the |
// symbol table. Cannot use symbol_table() here because the symbol |
@@ -1082,24 +1787,31 @@ static int CountMarkedCallback(HeapObject* obj) { |
#ifdef DEBUG |
+static int SafeSizeOf(HeapObject* obj) { |
+ MapWord map_word = obj->map_word(); |
+ map_word.ClearMark(); |
+ map_word.ClearOverflow(); |
+ return obj->SizeFromMap(map_word.ToMap()); |
+} |
+ |
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { |
- live_bytes_ += obj->Size(); |
+ const int size = SafeSizeOf(obj); |
+ AtomicAdd(&live_bytes_, size); |
if (Heap::new_space()->Contains(obj)) { |
- live_young_objects_size_ += obj->Size(); |
+ AtomicAdd(&live_young_objects_size_, size); |
} else if (Heap::map_space()->Contains(obj)) { |
- ASSERT(obj->IsMap()); |
- live_map_objects_size_ += obj->Size(); |
+ ASSERT(SafeIsMap(obj)); |
+ AtomicAdd(&live_map_objects_size_, size); |
} else if (Heap::cell_space()->Contains(obj)) { |
- ASSERT(obj->IsJSGlobalPropertyCell()); |
- live_cell_objects_size_ += obj->Size(); |
+ AtomicAdd(&live_cell_objects_size_, size); |
} else if (Heap::old_pointer_space()->Contains(obj)) { |
- live_old_pointer_objects_size_ += obj->Size(); |
+ AtomicAdd(&live_old_pointer_objects_size_, size); |
} else if (Heap::old_data_space()->Contains(obj)) { |
- live_old_data_objects_size_ += obj->Size(); |
+ AtomicAdd(&live_old_data_objects_size_, size); |
} else if (Heap::code_space()->Contains(obj)) { |
- live_code_objects_size_ += obj->Size(); |
+ AtomicAdd(&live_code_objects_size_, size); |
} else if (Heap::lo_space()->Contains(obj)) { |
- live_lo_objects_size_ += obj->Size(); |
+ AtomicAdd(&live_lo_objects_size_, size); |
} else { |
UNREACHABLE(); |
} |
@@ -1122,6 +1834,7 @@ void MarkCompactCollector::SweepLargeObjectSpace() { |
bool MarkCompactCollector::SafeIsMap(HeapObject* object) { |
MapWord metamap = object->map_word(); |
metamap.ClearMark(); |
+ metamap.ClearOverflow(); |
return metamap.ToMap()->instance_type() == MAP_TYPE; |
} |
@@ -1352,7 +2065,9 @@ inline void EncodeForwardingAddressesInRange(Address start, |
HeapObject* object = HeapObject::FromAddress(current); |
if (object->IsMarked()) { |
object->ClearMark(); |
+#ifdef DEBUG |
MarkCompactCollector::tracer()->decrement_marked_count(); |
+#endif |
object_size = object->Size(); |
Object* forwarded = Alloc(object, object_size); |
@@ -1577,7 +2292,11 @@ static void SweepNewSpace(NewSpace* space) { |
if (object->IsMarked()) { |
object->ClearMark(); |
+#ifdef DEBUG |
MarkCompactCollector::tracer()->decrement_marked_count(); |
+#endif |
+ |
+ ASSERT(object->map()->IsMarked()); |
size = object->Size(); |
survivors_size += size; |
@@ -1687,8 +2406,9 @@ static void SweepSpace(PagedSpace* space) { |
object = HeapObject::FromAddress(current); |
if (object->IsMarked()) { |
object->ClearMark(); |
+#ifdef DEBUG |
MarkCompactCollector::tracer()->decrement_marked_count(); |
- |
+#endif |
if (!is_previous_alive) { // Transition from free to live. |
space->DeallocateBlock(free_start, |
static_cast<int>(current - free_start), |
@@ -2072,7 +2792,7 @@ void MarkCompactCollector::SweepSpaces() { |
intptr_t live_maps_size = Heap::map_space()->Size(); |
int live_maps = static_cast<int>(live_maps_size / Map::kSize); |
- ASSERT(live_map_objects_size_ == live_maps_size); |
+ MARKING_INVARIANT_ST(live_map_objects_size_ == live_maps_size); |
if (Heap::map_space()->NeedsCompaction(live_maps)) { |
MapCompact map_compact(live_maps); |
@@ -2269,12 +2989,13 @@ void MarkCompactCollector::UpdatePointers() { |
USE(live_codes_size); |
USE(live_cells_size); |
USE(live_news_size); |
- ASSERT(live_maps_size == live_map_objects_size_); |
- ASSERT(live_data_olds_size == live_old_data_objects_size_); |
- ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); |
- ASSERT(live_codes_size == live_code_objects_size_); |
- ASSERT(live_cells_size == live_cell_objects_size_); |
- ASSERT(live_news_size == live_young_objects_size_); |
+ MARKING_INVARIANT_ST(live_maps_size == live_map_objects_size_); |
+ MARKING_INVARIANT_ST(live_data_olds_size == live_old_data_objects_size_); |
+ MARKING_INVARIANT_ST(live_pointer_olds_size == |
+ live_old_pointer_objects_size_); |
+ MARKING_INVARIANT_ST(live_codes_size == live_code_objects_size_); |
+ MARKING_INVARIANT_ST(live_cells_size == live_cell_objects_size_); |
+ MARKING_INVARIANT_ST(live_news_size == live_young_objects_size_); |
} |
@@ -2407,12 +3128,14 @@ void MarkCompactCollector::RelocateObjects() { |
USE(live_codes_size); |
USE(live_cells_size); |
USE(live_news_size); |
- ASSERT(live_maps_size == live_map_objects_size_); |
- ASSERT(live_data_olds_size == live_old_data_objects_size_); |
- ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); |
- ASSERT(live_codes_size == live_code_objects_size_); |
- ASSERT(live_cells_size == live_cell_objects_size_); |
- ASSERT(live_news_size == live_young_objects_size_); |
+ |
+ MARKING_INVARIANT_ST(live_maps_size == live_map_objects_size_); |
+ MARKING_INVARIANT_ST(live_data_olds_size == live_old_data_objects_size_); |
+ MARKING_INVARIANT_ST( |
+ live_pointer_olds_size == live_old_pointer_objects_size_); |
+ MARKING_INVARIANT_ST(live_codes_size == live_code_objects_size_); |
+ MARKING_INVARIANT_ST(live_cells_size == live_cell_objects_size_); |
+ MARKING_INVARIANT_ST(live_news_size == live_young_objects_size_); |
// Flip from and to spaces |
Heap::new_space()->Flip(); |