OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef V8_MARK_COMPACT_H_ | |
6 #define V8_MARK_COMPACT_H_ | |
7 | |
8 #include "src/compiler-intrinsics.h" | |
9 #include "src/spaces.h" | |
10 | |
11 namespace v8 { | |
12 namespace internal { | |
13 | |
14 // Callback function, returns whether an object is alive. The heap size | |
15 // of the object is returned in size. It optionally updates the offset | |
16 // to the first live object in the page (only used for old and map objects). | |
17 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); | |
18 | |
19 // Forward declarations. | |
20 class CodeFlusher; | |
21 class MarkCompactCollector; | |
22 class MarkingVisitor; | |
23 class RootMarkingVisitor; | |
24 | |
25 | |
26 class Marking { | |
27 public: | |
28 explicit Marking(Heap* heap) | |
29 : heap_(heap) { | |
30 } | |
31 | |
32 INLINE(static MarkBit MarkBitFrom(Address addr)); | |
33 | |
34 INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) { | |
35 return MarkBitFrom(reinterpret_cast<Address>(obj)); | |
36 } | |
37 | |
38 // Impossible markbits: 01 | |
39 static const char* kImpossibleBitPattern; | |
40 INLINE(static bool IsImpossible(MarkBit mark_bit)) { | |
41 return !mark_bit.Get() && mark_bit.Next().Get(); | |
42 } | |
43 | |
44 // Black markbits: 10 - this is required by the sweeper. | |
45 static const char* kBlackBitPattern; | |
46 INLINE(static bool IsBlack(MarkBit mark_bit)) { | |
47 return mark_bit.Get() && !mark_bit.Next().Get(); | |
48 } | |
49 | |
50 // White markbits: 00 - this is required by the mark bit clearer. | |
51 static const char* kWhiteBitPattern; | |
52 INLINE(static bool IsWhite(MarkBit mark_bit)) { | |
53 return !mark_bit.Get(); | |
54 } | |
55 | |
56 // Grey markbits: 11 | |
57 static const char* kGreyBitPattern; | |
58 INLINE(static bool IsGrey(MarkBit mark_bit)) { | |
59 return mark_bit.Get() && mark_bit.Next().Get(); | |
60 } | |
61 | |
62 INLINE(static void MarkBlack(MarkBit mark_bit)) { | |
63 mark_bit.Set(); | |
64 mark_bit.Next().Clear(); | |
65 } | |
66 | |
67 INLINE(static void BlackToGrey(MarkBit markbit)) { | |
68 markbit.Next().Set(); | |
69 } | |
70 | |
71 INLINE(static void WhiteToGrey(MarkBit markbit)) { | |
72 markbit.Set(); | |
73 markbit.Next().Set(); | |
74 } | |
75 | |
76 INLINE(static void GreyToBlack(MarkBit markbit)) { | |
77 markbit.Next().Clear(); | |
78 } | |
79 | |
80 INLINE(static void BlackToGrey(HeapObject* obj)) { | |
81 BlackToGrey(MarkBitFrom(obj)); | |
82 } | |
83 | |
84 INLINE(static void AnyToGrey(MarkBit markbit)) { | |
85 markbit.Set(); | |
86 markbit.Next().Set(); | |
87 } | |
88 | |
89 void TransferMark(Address old_start, Address new_start); | |
90 | |
91 #ifdef DEBUG | |
92 enum ObjectColor { | |
93 BLACK_OBJECT, | |
94 WHITE_OBJECT, | |
95 GREY_OBJECT, | |
96 IMPOSSIBLE_COLOR | |
97 }; | |
98 | |
99 static const char* ColorName(ObjectColor color) { | |
100 switch (color) { | |
101 case BLACK_OBJECT: return "black"; | |
102 case WHITE_OBJECT: return "white"; | |
103 case GREY_OBJECT: return "grey"; | |
104 case IMPOSSIBLE_COLOR: return "impossible"; | |
105 } | |
106 return "error"; | |
107 } | |
108 | |
109 static ObjectColor Color(HeapObject* obj) { | |
110 return Color(Marking::MarkBitFrom(obj)); | |
111 } | |
112 | |
113 static ObjectColor Color(MarkBit mark_bit) { | |
114 if (IsBlack(mark_bit)) return BLACK_OBJECT; | |
115 if (IsWhite(mark_bit)) return WHITE_OBJECT; | |
116 if (IsGrey(mark_bit)) return GREY_OBJECT; | |
117 UNREACHABLE(); | |
118 return IMPOSSIBLE_COLOR; | |
119 } | |
120 #endif | |
121 | |
122 // Returns true if the transferred color is black. | |
123 INLINE(static bool TransferColor(HeapObject* from, | |
124 HeapObject* to)) { | |
125 MarkBit from_mark_bit = MarkBitFrom(from); | |
126 MarkBit to_mark_bit = MarkBitFrom(to); | |
127 bool is_black = false; | |
128 if (from_mark_bit.Get()) { | |
129 to_mark_bit.Set(); | |
130 is_black = true; // Looks black so far. | |
131 } | |
132 if (from_mark_bit.Next().Get()) { | |
133 to_mark_bit.Next().Set(); | |
134 is_black = false; // Was actually gray. | |
135 } | |
136 return is_black; | |
137 } | |
138 | |
139 private: | |
140 Heap* heap_; | |
141 }; | |
142 | |
143 // ---------------------------------------------------------------------------- | |
144 // Marking deque for tracing live objects. | |
145 class MarkingDeque { | |
146 public: | |
147 MarkingDeque() | |
148 : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { } | |
149 | |
150 void Initialize(Address low, Address high) { | |
151 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low); | |
152 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high); | |
153 array_ = obj_low; | |
154 mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1; | |
155 top_ = bottom_ = 0; | |
156 overflowed_ = false; | |
157 } | |
158 | |
159 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; } | |
160 | |
161 inline bool IsEmpty() { return top_ == bottom_; } | |
162 | |
163 bool overflowed() const { return overflowed_; } | |
164 | |
165 void ClearOverflowed() { overflowed_ = false; } | |
166 | |
167 void SetOverflowed() { overflowed_ = true; } | |
168 | |
169 // Push the (marked) object on the marking stack if there is room, | |
170 // otherwise mark the object as overflowed and wait for a rescan of the | |
171 // heap. | |
172 INLINE(void PushBlack(HeapObject* object)) { | |
173 DCHECK(object->IsHeapObject()); | |
174 if (IsFull()) { | |
175 Marking::BlackToGrey(object); | |
176 MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size()); | |
177 SetOverflowed(); | |
178 } else { | |
179 array_[top_] = object; | |
180 top_ = ((top_ + 1) & mask_); | |
181 } | |
182 } | |
183 | |
184 INLINE(void PushGrey(HeapObject* object)) { | |
185 DCHECK(object->IsHeapObject()); | |
186 if (IsFull()) { | |
187 SetOverflowed(); | |
188 } else { | |
189 array_[top_] = object; | |
190 top_ = ((top_ + 1) & mask_); | |
191 } | |
192 } | |
193 | |
194 INLINE(HeapObject* Pop()) { | |
195 DCHECK(!IsEmpty()); | |
196 top_ = ((top_ - 1) & mask_); | |
197 HeapObject* object = array_[top_]; | |
198 DCHECK(object->IsHeapObject()); | |
199 return object; | |
200 } | |
201 | |
202 INLINE(void UnshiftGrey(HeapObject* object)) { | |
203 DCHECK(object->IsHeapObject()); | |
204 if (IsFull()) { | |
205 SetOverflowed(); | |
206 } else { | |
207 bottom_ = ((bottom_ - 1) & mask_); | |
208 array_[bottom_] = object; | |
209 } | |
210 } | |
211 | |
212 HeapObject** array() { return array_; } | |
213 int bottom() { return bottom_; } | |
214 int top() { return top_; } | |
215 int mask() { return mask_; } | |
216 void set_top(int top) { top_ = top; } | |
217 | |
218 private: | |
219 HeapObject** array_; | |
220 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is | |
221 // empty when top_ == bottom_. It is full when top_ + 1 == bottom | |
222 // (mod mask + 1). | |
223 int top_; | |
224 int bottom_; | |
225 int mask_; | |
226 bool overflowed_; | |
227 | |
228 DISALLOW_COPY_AND_ASSIGN(MarkingDeque); | |
229 }; | |
230 | |
231 | |
232 class SlotsBufferAllocator { | |
233 public: | |
234 SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer); | |
235 void DeallocateBuffer(SlotsBuffer* buffer); | |
236 | |
237 void DeallocateChain(SlotsBuffer** buffer_address); | |
238 }; | |
239 | |
240 | |
241 // SlotsBuffer records a sequence of slots that has to be updated | |
242 // after live objects were relocated from evacuation candidates. | |
243 // All slots are either untyped or typed: | |
244 // - Untyped slots are expected to contain a tagged object pointer. | |
245 // They are recorded by an address. | |
246 // - Typed slots are expected to contain an encoded pointer to a heap | |
247 // object where the way of encoding depends on the type of the slot. | |
248 // They are recorded as a pair (SlotType, slot address). | |
249 // We assume that zero-page is never mapped this allows us to distinguish | |
250 // untyped slots from typed slots during iteration by a simple comparison: | |
251 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it | |
252 // is the first element of typed slot's pair. | |
253 class SlotsBuffer { | |
254 public: | |
255 typedef Object** ObjectSlot; | |
256 | |
257 explicit SlotsBuffer(SlotsBuffer* next_buffer) | |
258 : idx_(0), chain_length_(1), next_(next_buffer) { | |
259 if (next_ != NULL) { | |
260 chain_length_ = next_->chain_length_ + 1; | |
261 } | |
262 } | |
263 | |
264 ~SlotsBuffer() { | |
265 } | |
266 | |
267 void Add(ObjectSlot slot) { | |
268 DCHECK(0 <= idx_ && idx_ < kNumberOfElements); | |
269 slots_[idx_++] = slot; | |
270 } | |
271 | |
272 enum SlotType { | |
273 EMBEDDED_OBJECT_SLOT, | |
274 RELOCATED_CODE_OBJECT, | |
275 CODE_TARGET_SLOT, | |
276 CODE_ENTRY_SLOT, | |
277 DEBUG_TARGET_SLOT, | |
278 JS_RETURN_SLOT, | |
279 NUMBER_OF_SLOT_TYPES | |
280 }; | |
281 | |
282 static const char* SlotTypeToString(SlotType type) { | |
283 switch (type) { | |
284 case EMBEDDED_OBJECT_SLOT: | |
285 return "EMBEDDED_OBJECT_SLOT"; | |
286 case RELOCATED_CODE_OBJECT: | |
287 return "RELOCATED_CODE_OBJECT"; | |
288 case CODE_TARGET_SLOT: | |
289 return "CODE_TARGET_SLOT"; | |
290 case CODE_ENTRY_SLOT: | |
291 return "CODE_ENTRY_SLOT"; | |
292 case DEBUG_TARGET_SLOT: | |
293 return "DEBUG_TARGET_SLOT"; | |
294 case JS_RETURN_SLOT: | |
295 return "JS_RETURN_SLOT"; | |
296 case NUMBER_OF_SLOT_TYPES: | |
297 return "NUMBER_OF_SLOT_TYPES"; | |
298 } | |
299 return "UNKNOWN SlotType"; | |
300 } | |
301 | |
302 void UpdateSlots(Heap* heap); | |
303 | |
304 void UpdateSlotsWithFilter(Heap* heap); | |
305 | |
306 SlotsBuffer* next() { return next_; } | |
307 | |
308 static int SizeOfChain(SlotsBuffer* buffer) { | |
309 if (buffer == NULL) return 0; | |
310 return static_cast<int>(buffer->idx_ + | |
311 (buffer->chain_length_ - 1) * kNumberOfElements); | |
312 } | |
313 | |
314 inline bool IsFull() { | |
315 return idx_ == kNumberOfElements; | |
316 } | |
317 | |
318 inline bool HasSpaceForTypedSlot() { | |
319 return idx_ < kNumberOfElements - 1; | |
320 } | |
321 | |
322 static void UpdateSlotsRecordedIn(Heap* heap, | |
323 SlotsBuffer* buffer, | |
324 bool code_slots_filtering_required) { | |
325 while (buffer != NULL) { | |
326 if (code_slots_filtering_required) { | |
327 buffer->UpdateSlotsWithFilter(heap); | |
328 } else { | |
329 buffer->UpdateSlots(heap); | |
330 } | |
331 buffer = buffer->next(); | |
332 } | |
333 } | |
334 | |
335 enum AdditionMode { | |
336 FAIL_ON_OVERFLOW, | |
337 IGNORE_OVERFLOW | |
338 }; | |
339 | |
340 static bool ChainLengthThresholdReached(SlotsBuffer* buffer) { | |
341 return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold; | |
342 } | |
343 | |
344 INLINE(static bool AddTo(SlotsBufferAllocator* allocator, | |
345 SlotsBuffer** buffer_address, | |
346 ObjectSlot slot, | |
347 AdditionMode mode)) { | |
348 SlotsBuffer* buffer = *buffer_address; | |
349 if (buffer == NULL || buffer->IsFull()) { | |
350 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { | |
351 allocator->DeallocateChain(buffer_address); | |
352 return false; | |
353 } | |
354 buffer = allocator->AllocateBuffer(buffer); | |
355 *buffer_address = buffer; | |
356 } | |
357 buffer->Add(slot); | |
358 return true; | |
359 } | |
360 | |
361 static bool IsTypedSlot(ObjectSlot slot); | |
362 | |
363 static bool AddTo(SlotsBufferAllocator* allocator, | |
364 SlotsBuffer** buffer_address, | |
365 SlotType type, | |
366 Address addr, | |
367 AdditionMode mode); | |
368 | |
369 static const int kNumberOfElements = 1021; | |
370 | |
371 private: | |
372 static const int kChainLengthThreshold = 15; | |
373 | |
374 intptr_t idx_; | |
375 intptr_t chain_length_; | |
376 SlotsBuffer* next_; | |
377 ObjectSlot slots_[kNumberOfElements]; | |
378 }; | |
379 | |
380 | |
381 // CodeFlusher collects candidates for code flushing during marking and | |
382 // processes those candidates after marking has completed in order to | |
383 // reset those functions referencing code objects that would otherwise | |
384 // be unreachable. Code objects can be referenced in three ways: | |
385 // - SharedFunctionInfo references unoptimized code. | |
386 // - JSFunction references either unoptimized or optimized code. | |
387 // - OptimizedCodeMap references optimized code. | |
388 // We are not allowed to flush unoptimized code for functions that got | |
389 // optimized or inlined into optimized code, because we might bailout | |
390 // into the unoptimized code again during deoptimization. | |
391 class CodeFlusher { | |
392 public: | |
393 explicit CodeFlusher(Isolate* isolate) | |
394 : isolate_(isolate), | |
395 jsfunction_candidates_head_(NULL), | |
396 shared_function_info_candidates_head_(NULL), | |
397 optimized_code_map_holder_head_(NULL) {} | |
398 | |
399 void AddCandidate(SharedFunctionInfo* shared_info) { | |
400 if (GetNextCandidate(shared_info) == NULL) { | |
401 SetNextCandidate(shared_info, shared_function_info_candidates_head_); | |
402 shared_function_info_candidates_head_ = shared_info; | |
403 } | |
404 } | |
405 | |
406 void AddCandidate(JSFunction* function) { | |
407 DCHECK(function->code() == function->shared()->code()); | |
408 if (GetNextCandidate(function)->IsUndefined()) { | |
409 SetNextCandidate(function, jsfunction_candidates_head_); | |
410 jsfunction_candidates_head_ = function; | |
411 } | |
412 } | |
413 | |
414 void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) { | |
415 if (GetNextCodeMap(code_map_holder)->IsUndefined()) { | |
416 SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_); | |
417 optimized_code_map_holder_head_ = code_map_holder; | |
418 } | |
419 } | |
420 | |
421 void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder); | |
422 void EvictCandidate(SharedFunctionInfo* shared_info); | |
423 void EvictCandidate(JSFunction* function); | |
424 | |
425 void ProcessCandidates() { | |
426 ProcessOptimizedCodeMaps(); | |
427 ProcessSharedFunctionInfoCandidates(); | |
428 ProcessJSFunctionCandidates(); | |
429 } | |
430 | |
431 void EvictAllCandidates() { | |
432 EvictOptimizedCodeMaps(); | |
433 EvictJSFunctionCandidates(); | |
434 EvictSharedFunctionInfoCandidates(); | |
435 } | |
436 | |
437 void IteratePointersToFromSpace(ObjectVisitor* v); | |
438 | |
439 private: | |
440 void ProcessOptimizedCodeMaps(); | |
441 void ProcessJSFunctionCandidates(); | |
442 void ProcessSharedFunctionInfoCandidates(); | |
443 void EvictOptimizedCodeMaps(); | |
444 void EvictJSFunctionCandidates(); | |
445 void EvictSharedFunctionInfoCandidates(); | |
446 | |
447 static JSFunction** GetNextCandidateSlot(JSFunction* candidate) { | |
448 return reinterpret_cast<JSFunction**>( | |
449 HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset)); | |
450 } | |
451 | |
452 static JSFunction* GetNextCandidate(JSFunction* candidate) { | |
453 Object* next_candidate = candidate->next_function_link(); | |
454 return reinterpret_cast<JSFunction*>(next_candidate); | |
455 } | |
456 | |
457 static void SetNextCandidate(JSFunction* candidate, | |
458 JSFunction* next_candidate) { | |
459 candidate->set_next_function_link(next_candidate); | |
460 } | |
461 | |
462 static void ClearNextCandidate(JSFunction* candidate, Object* undefined) { | |
463 DCHECK(undefined->IsUndefined()); | |
464 candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER); | |
465 } | |
466 | |
467 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) { | |
468 Object* next_candidate = candidate->code()->gc_metadata(); | |
469 return reinterpret_cast<SharedFunctionInfo*>(next_candidate); | |
470 } | |
471 | |
472 static void SetNextCandidate(SharedFunctionInfo* candidate, | |
473 SharedFunctionInfo* next_candidate) { | |
474 candidate->code()->set_gc_metadata(next_candidate); | |
475 } | |
476 | |
477 static void ClearNextCandidate(SharedFunctionInfo* candidate) { | |
478 candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER); | |
479 } | |
480 | |
481 static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) { | |
482 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); | |
483 Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex); | |
484 return reinterpret_cast<SharedFunctionInfo*>(next_map); | |
485 } | |
486 | |
487 static void SetNextCodeMap(SharedFunctionInfo* holder, | |
488 SharedFunctionInfo* next_holder) { | |
489 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); | |
490 code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder); | |
491 } | |
492 | |
493 static void ClearNextCodeMap(SharedFunctionInfo* holder) { | |
494 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); | |
495 code_map->set_undefined(SharedFunctionInfo::kNextMapIndex); | |
496 } | |
497 | |
498 Isolate* isolate_; | |
499 JSFunction* jsfunction_candidates_head_; | |
500 SharedFunctionInfo* shared_function_info_candidates_head_; | |
501 SharedFunctionInfo* optimized_code_map_holder_head_; | |
502 | |
503 DISALLOW_COPY_AND_ASSIGN(CodeFlusher); | |
504 }; | |
505 | |
506 | |
507 // Defined in isolate.h. | |
508 class ThreadLocalTop; | |
509 | |
510 | |
511 // ------------------------------------------------------------------------- | |
512 // Mark-Compact collector | |
513 class MarkCompactCollector { | |
514 public: | |
515 // Set the global flags, it must be called before Prepare to take effect. | |
516 inline void SetFlags(int flags); | |
517 | |
518 static void Initialize(); | |
519 | |
520 void SetUp(); | |
521 | |
522 void TearDown(); | |
523 | |
524 void CollectEvacuationCandidates(PagedSpace* space); | |
525 | |
526 void AddEvacuationCandidate(Page* p); | |
527 | |
528 // Prepares for GC by resetting relocation info in old and map spaces and | |
529 // choosing spaces to compact. | |
530 void Prepare(); | |
531 | |
532 // Performs a global garbage collection. | |
533 void CollectGarbage(); | |
534 | |
535 enum CompactionMode { | |
536 INCREMENTAL_COMPACTION, | |
537 NON_INCREMENTAL_COMPACTION | |
538 }; | |
539 | |
540 bool StartCompaction(CompactionMode mode); | |
541 | |
542 void AbortCompaction(); | |
543 | |
544 #ifdef DEBUG | |
545 // Checks whether performing mark-compact collection. | |
546 bool in_use() { return state_ > PREPARE_GC; } | |
547 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; } | |
548 #endif | |
549 | |
550 // Determine type of object and emit deletion log event. | |
551 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate); | |
552 | |
553 // Distinguishable invalid map encodings (for single word and multiple words) | |
554 // that indicate free regions. | |
555 static const uint32_t kSingleFreeEncoding = 0; | |
556 static const uint32_t kMultiFreeEncoding = 1; | |
557 | |
558 static inline bool IsMarked(Object* obj); | |
559 | |
560 inline Heap* heap() const { return heap_; } | |
561 inline Isolate* isolate() const; | |
562 | |
563 CodeFlusher* code_flusher() { return code_flusher_; } | |
564 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; } | |
565 void EnableCodeFlushing(bool enable); | |
566 | |
567 enum SweeperType { | |
568 PARALLEL_CONSERVATIVE, | |
569 CONCURRENT_CONSERVATIVE, | |
570 PARALLEL_PRECISE, | |
571 CONCURRENT_PRECISE, | |
572 PRECISE | |
573 }; | |
574 | |
575 enum SweepingParallelism { | |
576 SWEEP_ON_MAIN_THREAD, | |
577 SWEEP_IN_PARALLEL | |
578 }; | |
579 | |
580 #ifdef VERIFY_HEAP | |
581 void VerifyMarkbitsAreClean(); | |
582 static void VerifyMarkbitsAreClean(PagedSpace* space); | |
583 static void VerifyMarkbitsAreClean(NewSpace* space); | |
584 void VerifyWeakEmbeddedObjectsInCode(); | |
585 void VerifyOmittedMapChecks(); | |
586 #endif | |
587 | |
588 // Sweep a single page from the given space conservatively. | |
589 // Returns the size of the biggest continuous freed memory chunk in bytes. | |
590 template<SweepingParallelism type> | |
591 static int SweepConservatively(PagedSpace* space, | |
592 FreeList* free_list, | |
593 Page* p); | |
594 | |
595 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) { | |
596 return Page::FromAddress(reinterpret_cast<Address>(anchor))-> | |
597 ShouldSkipEvacuationSlotRecording(); | |
598 } | |
599 | |
600 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) { | |
601 return Page::FromAddress(reinterpret_cast<Address>(host))-> | |
602 ShouldSkipEvacuationSlotRecording(); | |
603 } | |
604 | |
605 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) { | |
606 return Page::FromAddress(reinterpret_cast<Address>(obj))-> | |
607 IsEvacuationCandidate(); | |
608 } | |
609 | |
610 INLINE(void EvictEvacuationCandidate(Page* page)) { | |
611 if (FLAG_trace_fragmentation) { | |
612 PrintF("Page %p is too popular. Disabling evacuation.\n", | |
613 reinterpret_cast<void*>(page)); | |
614 } | |
615 | |
616 // TODO(gc) If all evacuation candidates are too popular we | |
617 // should stop slots recording entirely. | |
618 page->ClearEvacuationCandidate(); | |
619 | |
620 // We were not collecting slots on this page that point | |
621 // to other evacuation candidates thus we have to | |
622 // rescan the page after evacuation to discover and update all | |
623 // pointers to evacuated objects. | |
624 if (page->owner()->identity() == OLD_DATA_SPACE) { | |
625 evacuation_candidates_.RemoveElement(page); | |
626 } else { | |
627 page->SetFlag(Page::RESCAN_ON_EVACUATION); | |
628 } | |
629 } | |
630 | |
631 void RecordRelocSlot(RelocInfo* rinfo, Object* target); | |
632 void RecordCodeEntrySlot(Address slot, Code* target); | |
633 void RecordCodeTargetPatch(Address pc, Code* target); | |
634 | |
635 INLINE(void RecordSlot(Object** anchor_slot, | |
636 Object** slot, | |
637 Object* object, | |
638 SlotsBuffer::AdditionMode mode = | |
639 SlotsBuffer::FAIL_ON_OVERFLOW)); | |
640 | |
641 void MigrateObject(HeapObject* dst, | |
642 HeapObject* src, | |
643 int size, | |
644 AllocationSpace to_old_space); | |
645 | |
646 bool TryPromoteObject(HeapObject* object, int object_size); | |
647 | |
648 void InvalidateCode(Code* code); | |
649 | |
650 void ClearMarkbits(); | |
651 | |
652 bool abort_incremental_marking() const { return abort_incremental_marking_; } | |
653 | |
654 bool is_compacting() const { return compacting_; } | |
655 | |
656 MarkingParity marking_parity() { return marking_parity_; } | |
657 | |
658 // Concurrent and parallel sweeping support. If required_freed_bytes was set | |
659 // to a value larger than 0, then sweeping returns after a block of at least | |
660 // required_freed_bytes was freed. If required_freed_bytes was set to zero | |
661 // then the whole given space is swept. It returns the size of the maximum | |
662 // continuous freed memory chunk. | |
663 int SweepInParallel(PagedSpace* space, int required_freed_bytes); | |
664 | |
665 // Sweeps a given page concurrently to the sweeper threads. It returns the | |
666 // size of the maximum continuous freed memory chunk. | |
667 int SweepInParallel(Page* page, PagedSpace* space); | |
668 | |
669 void EnsureSweepingCompleted(); | |
670 | |
671 // If sweeper threads are not active this method will return true. If | |
672 // this is a latency issue we should be smarter here. Otherwise, it will | |
673 // return true if the sweeper threads are done processing the pages. | |
674 bool IsSweepingCompleted(); | |
675 | |
676 void RefillFreeList(PagedSpace* space); | |
677 | |
678 bool AreSweeperThreadsActivated(); | |
679 | |
680 // Checks if sweeping is in progress right now on any space. | |
681 bool sweeping_in_progress() { return sweeping_in_progress_; } | |
682 | |
683 void set_sequential_sweeping(bool sequential_sweeping) { | |
684 sequential_sweeping_ = sequential_sweeping; | |
685 } | |
686 | |
687 bool sequential_sweeping() const { | |
688 return sequential_sweeping_; | |
689 } | |
690 | |
691 // Mark the global table which maps weak objects to dependent code without | |
692 // marking its contents. | |
693 void MarkWeakObjectToCodeTable(); | |
694 | |
695 // Special case for processing weak references in a full collection. We need | |
696 // to artificially keep AllocationSites alive for a time. | |
697 void MarkAllocationSite(AllocationSite* site); | |
698 | |
699 private: | |
700 class SweeperTask; | |
701 | |
702 explicit MarkCompactCollector(Heap* heap); | |
703 ~MarkCompactCollector(); | |
704 | |
705 bool MarkInvalidatedCode(); | |
706 bool WillBeDeoptimized(Code* code); | |
707 void RemoveDeadInvalidatedCode(); | |
708 void ProcessInvalidatedCode(ObjectVisitor* visitor); | |
709 | |
710 void StartSweeperThreads(); | |
711 | |
712 #ifdef DEBUG | |
713 enum CollectorState { | |
714 IDLE, | |
715 PREPARE_GC, | |
716 MARK_LIVE_OBJECTS, | |
717 SWEEP_SPACES, | |
718 ENCODE_FORWARDING_ADDRESSES, | |
719 UPDATE_POINTERS, | |
720 RELOCATE_OBJECTS | |
721 }; | |
722 | |
723 // The current stage of the collector. | |
724 CollectorState state_; | |
725 #endif | |
726 | |
727 // Global flag that forces sweeping to be precise, so we can traverse the | |
728 // heap. | |
729 bool sweep_precisely_; | |
730 | |
731 bool reduce_memory_footprint_; | |
732 | |
733 bool abort_incremental_marking_; | |
734 | |
735 MarkingParity marking_parity_; | |
736 | |
737 // True if we are collecting slots to perform evacuation from evacuation | |
738 // candidates. | |
739 bool compacting_; | |
740 | |
741 bool was_marked_incrementally_; | |
742 | |
743 // True if concurrent or parallel sweeping is currently in progress. | |
744 bool sweeping_in_progress_; | |
745 | |
746 base::Semaphore pending_sweeper_jobs_semaphore_; | |
747 | |
748 bool sequential_sweeping_; | |
749 | |
750 SlotsBufferAllocator slots_buffer_allocator_; | |
751 | |
752 SlotsBuffer* migration_slots_buffer_; | |
753 | |
754 // Finishes GC, performs heap verification if enabled. | |
755 void Finish(); | |
756 | |
757 // ----------------------------------------------------------------------- | |
758 // Phase 1: Marking live objects. | |
759 // | |
760 // Before: The heap has been prepared for garbage collection by | |
761 // MarkCompactCollector::Prepare() and is otherwise in its | |
762 // normal state. | |
763 // | |
764 // After: Live objects are marked and non-live objects are unmarked. | |
765 | |
766 friend class RootMarkingVisitor; | |
767 friend class MarkingVisitor; | |
768 friend class MarkCompactMarkingVisitor; | |
769 friend class CodeMarkingVisitor; | |
770 friend class SharedFunctionInfoMarkingVisitor; | |
771 | |
772 // Mark code objects that are active on the stack to prevent them | |
773 // from being flushed. | |
774 void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top); | |
775 | |
776 void PrepareForCodeFlushing(); | |
777 | |
778 // Marking operations for objects reachable from roots. | |
779 void MarkLiveObjects(); | |
780 | |
781 void AfterMarking(); | |
782 | |
783 // Marks the object black and pushes it on the marking stack. | |
784 // This is for non-incremental marking only. | |
785 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit)); | |
786 | |
787 // Marks the object black assuming that it is not yet marked. | |
788 // This is for non-incremental marking only. | |
789 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit)); | |
790 | |
791 // Mark the heap roots and all objects reachable from them. | |
792 void MarkRoots(RootMarkingVisitor* visitor); | |
793 | |
794 // Mark the string table specially. References to internalized strings from | |
795 // the string table are weak. | |
796 void MarkStringTable(RootMarkingVisitor* visitor); | |
797 | |
798 // Mark objects in implicit references groups if their parent object | |
799 // is marked. | |
800 void MarkImplicitRefGroups(); | |
801 | |
802 // Mark objects reachable (transitively) from objects in the marking stack | |
803 // or overflowed in the heap. | |
804 void ProcessMarkingDeque(); | |
805 | |
806 // Mark objects reachable (transitively) from objects in the marking stack | |
807 // or overflowed in the heap. This respects references only considered in | |
808 // the final atomic marking pause including the following: | |
809 // - Processing of objects reachable through Harmony WeakMaps. | |
810 // - Objects reachable due to host application logic like object groups | |
811 // or implicit references' groups. | |
812 void ProcessEphemeralMarking(ObjectVisitor* visitor); | |
813 | |
814 // If the call-site of the top optimized code was not prepared for | |
815 // deoptimization, then treat the maps in the code as strong pointers, | |
816 // otherwise a map can die and deoptimize the code. | |
817 void ProcessTopOptimizedFrame(ObjectVisitor* visitor); | |
818 | |
819 // Mark objects reachable (transitively) from objects in the marking | |
820 // stack. This function empties the marking stack, but may leave | |
821 // overflowed objects in the heap, in which case the marking stack's | |
822 // overflow flag will be set. | |
823 void EmptyMarkingDeque(); | |
824 | |
825 // Refill the marking stack with overflowed objects from the heap. This | |
826 // function either leaves the marking stack full or clears the overflow | |
827 // flag on the marking stack. | |
828 void RefillMarkingDeque(); | |
829 | |
830 // After reachable maps have been marked process per context object | |
831 // literal map caches removing unmarked entries. | |
832 void ProcessMapCaches(); | |
833 | |
834 // Callback function for telling whether the object *p is an unmarked | |
835 // heap object. | |
836 static bool IsUnmarkedHeapObject(Object** p); | |
837 static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p); | |
838 | |
839 // Map transitions from a live map to a dead map must be killed. | |
840 // We replace them with a null descriptor, with the same key. | |
841 void ClearNonLiveReferences(); | |
842 void ClearNonLivePrototypeTransitions(Map* map); | |
843 void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark); | |
844 | |
845 void ClearDependentCode(DependentCode* dependent_code); | |
846 void ClearDependentICList(Object* head); | |
847 void ClearNonLiveDependentCode(DependentCode* dependent_code); | |
848 int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group, | |
849 int start, int end, int new_start); | |
850 | |
851 // Mark all values associated with reachable keys in weak collections | |
852 // encountered so far. This might push new object or even new weak maps onto | |
853 // the marking stack. | |
854 void ProcessWeakCollections(); | |
855 | |
856 // After all reachable objects have been marked those weak map entries | |
857 // with an unreachable key are removed from all encountered weak maps. | |
858 // The linked list of all encountered weak maps is destroyed. | |
859 void ClearWeakCollections(); | |
860 | |
861 // ----------------------------------------------------------------------- | |
862 // Phase 2: Sweeping to clear mark bits and free non-live objects for | |
863 // a non-compacting collection. | |
864 // | |
865 // Before: Live objects are marked and non-live objects are unmarked. | |
866 // | |
867 // After: Live objects are unmarked, non-live regions have been added to | |
868 // their space's free list. Active eden semispace is compacted by | |
869 // evacuation. | |
870 // | |
871 | |
872 // If we are not compacting the heap, we simply sweep the spaces except | |
873 // for the large object space, clearing mark bits and adding unmarked | |
874 // regions to each space's free list. | |
875 void SweepSpaces(); | |
876 | |
877 int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space, | |
878 NewSpacePage* p); | |
879 | |
880 void EvacuateNewSpace(); | |
881 | |
882 void EvacuateLiveObjectsFromPage(Page* p); | |
883 | |
884 void EvacuatePages(); | |
885 | |
886 void EvacuateNewSpaceAndCandidates(); | |
887 | |
888 void ReleaseEvacuationCandidates(); | |
889 | |
890 // Moves the pages of the evacuation_candidates_ list to the end of their | |
891 // corresponding space pages list. | |
892 void MoveEvacuationCandidatesToEndOfPagesList(); | |
893 | |
894 void SweepSpace(PagedSpace* space, SweeperType sweeper); | |
895 | |
896 // Finalizes the parallel sweeping phase. Marks all the pages that were | |
897 // swept in parallel. | |
898 void ParallelSweepSpacesComplete(); | |
899 | |
900 void ParallelSweepSpaceComplete(PagedSpace* space); | |
901 | |
902 // Updates store buffer and slot buffer for a pointer in a migrating object. | |
903 void RecordMigratedSlot(Object* value, Address slot); | |
904 | |
905 #ifdef DEBUG | |
906 friend class MarkObjectVisitor; | |
907 static void VisitObject(HeapObject* obj); | |
908 | |
909 friend class UnmarkObjectVisitor; | |
910 static void UnmarkObject(HeapObject* obj); | |
911 #endif | |
912 | |
913 Heap* heap_; | |
914 MarkingDeque marking_deque_; | |
915 CodeFlusher* code_flusher_; | |
916 bool have_code_to_deoptimize_; | |
917 | |
918 List<Page*> evacuation_candidates_; | |
919 List<Code*> invalidated_code_; | |
920 | |
921 SmartPointer<FreeList> free_list_old_data_space_; | |
922 SmartPointer<FreeList> free_list_old_pointer_space_; | |
923 | |
924 friend class Heap; | |
925 }; | |
926 | |
927 | |
928 class MarkBitCellIterator BASE_EMBEDDED { | |
929 public: | |
930 explicit MarkBitCellIterator(MemoryChunk* chunk) | |
931 : chunk_(chunk) { | |
932 last_cell_index_ = Bitmap::IndexToCell( | |
933 Bitmap::CellAlignIndex( | |
934 chunk_->AddressToMarkbitIndex(chunk_->area_end()))); | |
935 cell_base_ = chunk_->area_start(); | |
936 cell_index_ = Bitmap::IndexToCell( | |
937 Bitmap::CellAlignIndex( | |
938 chunk_->AddressToMarkbitIndex(cell_base_))); | |
939 cells_ = chunk_->markbits()->cells(); | |
940 } | |
941 | |
942 inline bool Done() { return cell_index_ == last_cell_index_; } | |
943 | |
944 inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; } | |
945 | |
946 inline MarkBit::CellType* CurrentCell() { | |
947 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( | |
948 chunk_->AddressToMarkbitIndex(cell_base_)))); | |
949 return &cells_[cell_index_]; | |
950 } | |
951 | |
952 inline Address CurrentCellBase() { | |
953 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( | |
954 chunk_->AddressToMarkbitIndex(cell_base_)))); | |
955 return cell_base_; | |
956 } | |
957 | |
958 inline void Advance() { | |
959 cell_index_++; | |
960 cell_base_ += 32 * kPointerSize; | |
961 } | |
962 | |
963 private: | |
964 MemoryChunk* chunk_; | |
965 MarkBit::CellType* cells_; | |
966 unsigned int last_cell_index_; | |
967 unsigned int cell_index_; | |
968 Address cell_base_; | |
969 }; | |
970 | |
971 | |
972 class SequentialSweepingScope BASE_EMBEDDED { | |
973 public: | |
974 explicit SequentialSweepingScope(MarkCompactCollector *collector) : | |
975 collector_(collector) { | |
976 collector_->set_sequential_sweeping(true); | |
977 } | |
978 | |
979 ~SequentialSweepingScope() { | |
980 collector_->set_sequential_sweeping(false); | |
981 } | |
982 | |
983 private: | |
984 MarkCompactCollector* collector_; | |
985 }; | |
986 | |
987 | |
988 const char* AllocationSpaceName(AllocationSpace space); | |
989 | |
990 } } // namespace v8::internal | |
991 | |
992 #endif // V8_MARK_COMPACT_H_ | |
OLD | NEW |