OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/v8.h" | |
6 | |
7 #include "src/incremental-marking.h" | |
8 | |
9 #include "src/code-stubs.h" | |
10 #include "src/compilation-cache.h" | |
11 #include "src/conversions.h" | |
12 #include "src/objects-visiting.h" | |
13 #include "src/objects-visiting-inl.h" | |
14 | |
15 namespace v8 { | |
16 namespace internal { | |
17 | |
18 | |
19 IncrementalMarking::IncrementalMarking(Heap* heap) | |
20 : heap_(heap), | |
21 state_(STOPPED), | |
22 marking_deque_memory_(NULL), | |
23 marking_deque_memory_committed_(false), | |
24 steps_count_(0), | |
25 old_generation_space_available_at_start_of_incremental_(0), | |
26 old_generation_space_used_at_start_of_incremental_(0), | |
27 should_hurry_(false), | |
28 marking_speed_(0), | |
29 allocated_(0), | |
30 no_marking_scope_depth_(0), | |
31 unscanned_bytes_of_large_object_(0) {} | |
32 | |
33 | |
34 void IncrementalMarking::TearDown() { | |
35 delete marking_deque_memory_; | |
36 } | |
37 | |
38 | |
39 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, | |
40 Object** slot, | |
41 Object* value) { | |
42 if (BaseRecordWrite(obj, slot, value) && slot != NULL) { | |
43 MarkBit obj_bit = Marking::MarkBitFrom(obj); | |
44 if (Marking::IsBlack(obj_bit)) { | |
45 // Object is not going to be rescanned we need to record the slot. | |
46 heap_->mark_compact_collector()->RecordSlot( | |
47 HeapObject::RawField(obj, 0), slot, value); | |
48 } | |
49 } | |
50 } | |
51 | |
52 | |
53 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, | |
54 Object** slot, | |
55 Isolate* isolate) { | |
56 DCHECK(obj->IsHeapObject()); | |
57 IncrementalMarking* marking = isolate->heap()->incremental_marking(); | |
58 | |
59 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | |
60 int counter = chunk->write_barrier_counter(); | |
61 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { | |
62 marking->write_barriers_invoked_since_last_step_ += | |
63 MemoryChunk::kWriteBarrierCounterGranularity - | |
64 chunk->write_barrier_counter(); | |
65 chunk->set_write_barrier_counter( | |
66 MemoryChunk::kWriteBarrierCounterGranularity); | |
67 } | |
68 | |
69 marking->RecordWrite(obj, slot, *slot); | |
70 } | |
71 | |
72 | |
73 void IncrementalMarking::RecordCodeTargetPatch(Code* host, | |
74 Address pc, | |
75 HeapObject* value) { | |
76 if (IsMarking()) { | |
77 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | |
78 RecordWriteIntoCode(host, &rinfo, value); | |
79 } | |
80 } | |
81 | |
82 | |
83 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { | |
84 if (IsMarking()) { | |
85 Code* host = heap_->isolate()->inner_pointer_to_code_cache()-> | |
86 GcSafeFindCodeForInnerPointer(pc); | |
87 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | |
88 RecordWriteIntoCode(host, &rinfo, value); | |
89 } | |
90 } | |
91 | |
92 | |
93 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host, | |
94 Object** slot, | |
95 Code* value) { | |
96 if (BaseRecordWrite(host, slot, value)) { | |
97 DCHECK(slot != NULL); | |
98 heap_->mark_compact_collector()-> | |
99 RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value); | |
100 } | |
101 } | |
102 | |
103 | |
104 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj, | |
105 RelocInfo* rinfo, | |
106 Object* value) { | |
107 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value)); | |
108 if (Marking::IsWhite(value_bit)) { | |
109 MarkBit obj_bit = Marking::MarkBitFrom(obj); | |
110 if (Marking::IsBlack(obj_bit)) { | |
111 BlackToGreyAndUnshift(obj, obj_bit); | |
112 RestartIfNotMarking(); | |
113 } | |
114 // Object is either grey or white. It will be scanned if survives. | |
115 return; | |
116 } | |
117 | |
118 if (is_compacting_) { | |
119 MarkBit obj_bit = Marking::MarkBitFrom(obj); | |
120 if (Marking::IsBlack(obj_bit)) { | |
121 // Object is not going to be rescanned. We need to record the slot. | |
122 heap_->mark_compact_collector()->RecordRelocSlot(rinfo, | |
123 Code::cast(value)); | |
124 } | |
125 } | |
126 } | |
127 | |
128 | |
129 static void MarkObjectGreyDoNotEnqueue(Object* obj) { | |
130 if (obj->IsHeapObject()) { | |
131 HeapObject* heap_obj = HeapObject::cast(obj); | |
132 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); | |
133 if (Marking::IsBlack(mark_bit)) { | |
134 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), | |
135 -heap_obj->Size()); | |
136 } | |
137 Marking::AnyToGrey(mark_bit); | |
138 } | |
139 } | |
140 | |
141 | |
142 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object, | |
143 MarkBit mark_bit, | |
144 int size) { | |
145 DCHECK(!Marking::IsImpossible(mark_bit)); | |
146 if (mark_bit.Get()) return; | |
147 mark_bit.Set(); | |
148 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); | |
149 DCHECK(Marking::IsBlack(mark_bit)); | |
150 } | |
151 | |
152 | |
153 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object, | |
154 MarkBit mark_bit, | |
155 int size) { | |
156 DCHECK(!Marking::IsImpossible(mark_bit)); | |
157 if (Marking::IsBlack(mark_bit)) return; | |
158 Marking::MarkBlack(mark_bit); | |
159 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); | |
160 DCHECK(Marking::IsBlack(mark_bit)); | |
161 } | |
162 | |
163 | |
164 class IncrementalMarkingMarkingVisitor | |
165 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> { | |
166 public: | |
167 static void Initialize() { | |
168 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize(); | |
169 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental); | |
170 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental); | |
171 table_.Register(kVisitJSRegExp, &VisitJSRegExp); | |
172 } | |
173 | |
174 static const int kProgressBarScanningChunk = 32 * 1024; | |
175 | |
176 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) { | |
177 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); | |
178 // TODO(mstarzinger): Move setting of the flag to the allocation site of | |
179 // the array. The visitor should just check the flag. | |
180 if (FLAG_use_marking_progress_bar && | |
181 chunk->owner()->identity() == LO_SPACE) { | |
182 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR); | |
183 } | |
184 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { | |
185 Heap* heap = map->GetHeap(); | |
186 // When using a progress bar for large fixed arrays, scan only a chunk of | |
187 // the array and try to push it onto the marking deque again until it is | |
188 // fully scanned. Fall back to scanning it through to the end in case this | |
189 // fails because of a full deque. | |
190 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); | |
191 int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset, | |
192 chunk->progress_bar()); | |
193 int end_offset = Min(object_size, | |
194 start_offset + kProgressBarScanningChunk); | |
195 int already_scanned_offset = start_offset; | |
196 bool scan_until_end = false; | |
197 do { | |
198 VisitPointersWithAnchor(heap, | |
199 HeapObject::RawField(object, 0), | |
200 HeapObject::RawField(object, start_offset), | |
201 HeapObject::RawField(object, end_offset)); | |
202 start_offset = end_offset; | |
203 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk); | |
204 scan_until_end = heap->incremental_marking()->marking_deque()->IsFull(); | |
205 } while (scan_until_end && start_offset < object_size); | |
206 chunk->set_progress_bar(start_offset); | |
207 if (start_offset < object_size) { | |
208 heap->incremental_marking()->marking_deque()->UnshiftGrey(object); | |
209 heap->incremental_marking()->NotifyIncompleteScanOfObject( | |
210 object_size - (start_offset - already_scanned_offset)); | |
211 } | |
212 } else { | |
213 FixedArrayVisitor::Visit(map, object); | |
214 } | |
215 } | |
216 | |
217 static void VisitNativeContextIncremental(Map* map, HeapObject* object) { | |
218 Context* context = Context::cast(object); | |
219 | |
220 // We will mark cache black with a separate pass when we finish marking. | |
221 // Note that GC can happen when the context is not fully initialized, | |
222 // so the cache can be undefined. | |
223 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX); | |
224 if (!cache->IsUndefined()) { | |
225 MarkObjectGreyDoNotEnqueue(cache); | |
226 } | |
227 VisitNativeContext(map, context); | |
228 } | |
229 | |
230 INLINE(static void VisitPointer(Heap* heap, Object** p)) { | |
231 Object* obj = *p; | |
232 if (obj->IsHeapObject()) { | |
233 heap->mark_compact_collector()->RecordSlot(p, p, obj); | |
234 MarkObject(heap, obj); | |
235 } | |
236 } | |
237 | |
238 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { | |
239 for (Object** p = start; p < end; p++) { | |
240 Object* obj = *p; | |
241 if (obj->IsHeapObject()) { | |
242 heap->mark_compact_collector()->RecordSlot(start, p, obj); | |
243 MarkObject(heap, obj); | |
244 } | |
245 } | |
246 } | |
247 | |
248 INLINE(static void VisitPointersWithAnchor(Heap* heap, | |
249 Object** anchor, | |
250 Object** start, | |
251 Object** end)) { | |
252 for (Object** p = start; p < end; p++) { | |
253 Object* obj = *p; | |
254 if (obj->IsHeapObject()) { | |
255 heap->mark_compact_collector()->RecordSlot(anchor, p, obj); | |
256 MarkObject(heap, obj); | |
257 } | |
258 } | |
259 } | |
260 | |
261 // Marks the object grey and pushes it on the marking stack. | |
262 INLINE(static void MarkObject(Heap* heap, Object* obj)) { | |
263 HeapObject* heap_object = HeapObject::cast(obj); | |
264 MarkBit mark_bit = Marking::MarkBitFrom(heap_object); | |
265 if (mark_bit.data_only()) { | |
266 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size()); | |
267 } else if (Marking::IsWhite(mark_bit)) { | |
268 heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit); | |
269 } | |
270 } | |
271 | |
272 // Marks the object black without pushing it on the marking stack. | |
273 // Returns true if object needed marking and false otherwise. | |
274 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) { | |
275 HeapObject* heap_object = HeapObject::cast(obj); | |
276 MarkBit mark_bit = Marking::MarkBitFrom(heap_object); | |
277 if (Marking::IsWhite(mark_bit)) { | |
278 mark_bit.Set(); | |
279 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), | |
280 heap_object->Size()); | |
281 return true; | |
282 } | |
283 return false; | |
284 } | |
285 }; | |
286 | |
287 | |
288 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { | |
289 public: | |
290 explicit IncrementalMarkingRootMarkingVisitor( | |
291 IncrementalMarking* incremental_marking) | |
292 : incremental_marking_(incremental_marking) { | |
293 } | |
294 | |
295 void VisitPointer(Object** p) { | |
296 MarkObjectByPointer(p); | |
297 } | |
298 | |
299 void VisitPointers(Object** start, Object** end) { | |
300 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); | |
301 } | |
302 | |
303 private: | |
304 void MarkObjectByPointer(Object** p) { | |
305 Object* obj = *p; | |
306 if (!obj->IsHeapObject()) return; | |
307 | |
308 HeapObject* heap_object = HeapObject::cast(obj); | |
309 MarkBit mark_bit = Marking::MarkBitFrom(heap_object); | |
310 if (mark_bit.data_only()) { | |
311 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size()); | |
312 } else { | |
313 if (Marking::IsWhite(mark_bit)) { | |
314 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); | |
315 } | |
316 } | |
317 } | |
318 | |
319 IncrementalMarking* incremental_marking_; | |
320 }; | |
321 | |
322 | |
323 void IncrementalMarking::Initialize() { | |
324 IncrementalMarkingMarkingVisitor::Initialize(); | |
325 } | |
326 | |
327 | |
328 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, | |
329 bool is_marking, | |
330 bool is_compacting) { | |
331 if (is_marking) { | |
332 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); | |
333 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); | |
334 | |
335 // It's difficult to filter out slots recorded for large objects. | |
336 if (chunk->owner()->identity() == LO_SPACE && | |
337 chunk->size() > static_cast<size_t>(Page::kPageSize) && | |
338 is_compacting) { | |
339 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION); | |
340 } | |
341 } else if (chunk->owner()->identity() == CELL_SPACE || | |
342 chunk->owner()->identity() == PROPERTY_CELL_SPACE || | |
343 chunk->scan_on_scavenge()) { | |
344 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); | |
345 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); | |
346 } else { | |
347 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); | |
348 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); | |
349 } | |
350 } | |
351 | |
352 | |
353 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk, | |
354 bool is_marking) { | |
355 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); | |
356 if (is_marking) { | |
357 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); | |
358 } else { | |
359 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); | |
360 } | |
361 chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE); | |
362 } | |
363 | |
364 | |
365 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( | |
366 PagedSpace* space) { | |
367 PageIterator it(space); | |
368 while (it.has_next()) { | |
369 Page* p = it.next(); | |
370 SetOldSpacePageFlags(p, false, false); | |
371 } | |
372 } | |
373 | |
374 | |
375 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( | |
376 NewSpace* space) { | |
377 NewSpacePageIterator it(space); | |
378 while (it.has_next()) { | |
379 NewSpacePage* p = it.next(); | |
380 SetNewSpacePageFlags(p, false); | |
381 } | |
382 } | |
383 | |
384 | |
385 void IncrementalMarking::DeactivateIncrementalWriteBarrier() { | |
386 DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space()); | |
387 DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space()); | |
388 DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space()); | |
389 DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space()); | |
390 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space()); | |
391 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space()); | |
392 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space()); | |
393 | |
394 LargePage* lop = heap_->lo_space()->first_page(); | |
395 while (lop->is_valid()) { | |
396 SetOldSpacePageFlags(lop, false, false); | |
397 lop = lop->next_page(); | |
398 } | |
399 } | |
400 | |
401 | |
402 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) { | |
403 PageIterator it(space); | |
404 while (it.has_next()) { | |
405 Page* p = it.next(); | |
406 SetOldSpacePageFlags(p, true, is_compacting_); | |
407 } | |
408 } | |
409 | |
410 | |
411 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) { | |
412 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); | |
413 while (it.has_next()) { | |
414 NewSpacePage* p = it.next(); | |
415 SetNewSpacePageFlags(p, true); | |
416 } | |
417 } | |
418 | |
419 | |
420 void IncrementalMarking::ActivateIncrementalWriteBarrier() { | |
421 ActivateIncrementalWriteBarrier(heap_->old_pointer_space()); | |
422 ActivateIncrementalWriteBarrier(heap_->old_data_space()); | |
423 ActivateIncrementalWriteBarrier(heap_->cell_space()); | |
424 ActivateIncrementalWriteBarrier(heap_->property_cell_space()); | |
425 ActivateIncrementalWriteBarrier(heap_->map_space()); | |
426 ActivateIncrementalWriteBarrier(heap_->code_space()); | |
427 ActivateIncrementalWriteBarrier(heap_->new_space()); | |
428 | |
429 LargePage* lop = heap_->lo_space()->first_page(); | |
430 while (lop->is_valid()) { | |
431 SetOldSpacePageFlags(lop, true, is_compacting_); | |
432 lop = lop->next_page(); | |
433 } | |
434 } | |
435 | |
436 | |
437 bool IncrementalMarking::WorthActivating() { | |
438 #ifndef DEBUG | |
439 static const intptr_t kActivationThreshold = 8 * MB; | |
440 #else | |
441 // TODO(gc) consider setting this to some low level so that some | |
442 // debug tests run with incremental marking and some without. | |
443 static const intptr_t kActivationThreshold = 0; | |
444 #endif | |
445 // Only start incremental marking in a safe state: 1) when incremental | |
446 // marking is turned on, 2) when we are currently not in a GC, and | |
447 // 3) when we are currently not serializing or deserializing the heap. | |
448 return FLAG_incremental_marking && | |
449 FLAG_incremental_marking_steps && | |
450 heap_->gc_state() == Heap::NOT_IN_GC && | |
451 !heap_->isolate()->serializer_enabled() && | |
452 heap_->isolate()->IsInitialized() && | |
453 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold; | |
454 } | |
455 | |
456 | |
457 void IncrementalMarking::ActivateGeneratedStub(Code* stub) { | |
458 DCHECK(RecordWriteStub::GetMode(stub) == | |
459 RecordWriteStub::STORE_BUFFER_ONLY); | |
460 | |
461 if (!IsMarking()) { | |
462 // Initially stub is generated in STORE_BUFFER_ONLY mode thus | |
463 // we don't need to do anything if incremental marking is | |
464 // not active. | |
465 } else if (IsCompacting()) { | |
466 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); | |
467 } else { | |
468 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); | |
469 } | |
470 } | |
471 | |
472 | |
473 static void PatchIncrementalMarkingRecordWriteStubs( | |
474 Heap* heap, RecordWriteStub::Mode mode) { | |
475 UnseededNumberDictionary* stubs = heap->code_stubs(); | |
476 | |
477 int capacity = stubs->Capacity(); | |
478 for (int i = 0; i < capacity; i++) { | |
479 Object* k = stubs->KeyAt(i); | |
480 if (stubs->IsKey(k)) { | |
481 uint32_t key = NumberToUint32(k); | |
482 | |
483 if (CodeStub::MajorKeyFromKey(key) == | |
484 CodeStub::RecordWrite) { | |
485 Object* e = stubs->ValueAt(i); | |
486 if (e->IsCode()) { | |
487 RecordWriteStub::Patch(Code::cast(e), mode); | |
488 } | |
489 } | |
490 } | |
491 } | |
492 } | |
493 | |
494 | |
495 void IncrementalMarking::EnsureMarkingDequeIsCommitted() { | |
496 if (marking_deque_memory_ == NULL) { | |
497 marking_deque_memory_ = new base::VirtualMemory(4 * MB); | |
498 } | |
499 if (!marking_deque_memory_committed_) { | |
500 bool success = marking_deque_memory_->Commit( | |
501 reinterpret_cast<Address>(marking_deque_memory_->address()), | |
502 marking_deque_memory_->size(), | |
503 false); // Not executable. | |
504 CHECK(success); | |
505 marking_deque_memory_committed_ = true; | |
506 } | |
507 } | |
508 | |
509 | |
510 void IncrementalMarking::UncommitMarkingDeque() { | |
511 if (state_ == STOPPED && marking_deque_memory_committed_) { | |
512 bool success = marking_deque_memory_->Uncommit( | |
513 reinterpret_cast<Address>(marking_deque_memory_->address()), | |
514 marking_deque_memory_->size()); | |
515 CHECK(success); | |
516 marking_deque_memory_committed_ = false; | |
517 } | |
518 } | |
519 | |
520 | |
521 void IncrementalMarking::Start(CompactionFlag flag) { | |
522 if (FLAG_trace_incremental_marking) { | |
523 PrintF("[IncrementalMarking] Start\n"); | |
524 } | |
525 DCHECK(FLAG_incremental_marking); | |
526 DCHECK(FLAG_incremental_marking_steps); | |
527 DCHECK(state_ == STOPPED); | |
528 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC); | |
529 DCHECK(!heap_->isolate()->serializer_enabled()); | |
530 DCHECK(heap_->isolate()->IsInitialized()); | |
531 | |
532 ResetStepCounters(); | |
533 | |
534 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { | |
535 StartMarking(flag); | |
536 } else { | |
537 if (FLAG_trace_incremental_marking) { | |
538 PrintF("[IncrementalMarking] Start sweeping.\n"); | |
539 } | |
540 state_ = SWEEPING; | |
541 } | |
542 | |
543 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold); | |
544 } | |
545 | |
546 | |
547 void IncrementalMarking::StartMarking(CompactionFlag flag) { | |
548 if (FLAG_trace_incremental_marking) { | |
549 PrintF("[IncrementalMarking] Start marking\n"); | |
550 } | |
551 | |
552 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) && | |
553 heap_->mark_compact_collector()->StartCompaction( | |
554 MarkCompactCollector::INCREMENTAL_COMPACTION); | |
555 | |
556 state_ = MARKING; | |
557 | |
558 RecordWriteStub::Mode mode = is_compacting_ ? | |
559 RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL; | |
560 | |
561 PatchIncrementalMarkingRecordWriteStubs(heap_, mode); | |
562 | |
563 EnsureMarkingDequeIsCommitted(); | |
564 | |
565 // Initialize marking stack. | |
566 Address addr = static_cast<Address>(marking_deque_memory_->address()); | |
567 size_t size = marking_deque_memory_->size(); | |
568 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; | |
569 marking_deque_.Initialize(addr, addr + size); | |
570 | |
571 ActivateIncrementalWriteBarrier(); | |
572 | |
573 // Marking bits are cleared by the sweeper. | |
574 #ifdef VERIFY_HEAP | |
575 if (FLAG_verify_heap) { | |
576 heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); | |
577 } | |
578 #endif | |
579 | |
580 heap_->CompletelyClearInstanceofCache(); | |
581 heap_->isolate()->compilation_cache()->MarkCompactPrologue(); | |
582 | |
583 if (FLAG_cleanup_code_caches_at_gc) { | |
584 // We will mark cache black with a separate pass | |
585 // when we finish marking. | |
586 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache()); | |
587 } | |
588 | |
589 // Mark strong roots grey. | |
590 IncrementalMarkingRootMarkingVisitor visitor(this); | |
591 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); | |
592 | |
593 heap_->mark_compact_collector()->MarkWeakObjectToCodeTable(); | |
594 | |
595 // Ready to start incremental marking. | |
596 if (FLAG_trace_incremental_marking) { | |
597 PrintF("[IncrementalMarking] Running\n"); | |
598 } | |
599 } | |
600 | |
601 | |
602 void IncrementalMarking::PrepareForScavenge() { | |
603 if (!IsMarking()) return; | |
604 NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(), | |
605 heap_->new_space()->FromSpaceEnd()); | |
606 while (it.has_next()) { | |
607 Bitmap::Clear(it.next()); | |
608 } | |
609 } | |
610 | |
611 | |
612 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() { | |
613 if (!IsMarking()) return; | |
614 | |
615 int current = marking_deque_.bottom(); | |
616 int mask = marking_deque_.mask(); | |
617 int limit = marking_deque_.top(); | |
618 HeapObject** array = marking_deque_.array(); | |
619 int new_top = current; | |
620 | |
621 Map* filler_map = heap_->one_pointer_filler_map(); | |
622 | |
623 while (current != limit) { | |
624 HeapObject* obj = array[current]; | |
625 DCHECK(obj->IsHeapObject()); | |
626 current = ((current + 1) & mask); | |
627 if (heap_->InNewSpace(obj)) { | |
628 MapWord map_word = obj->map_word(); | |
629 if (map_word.IsForwardingAddress()) { | |
630 HeapObject* dest = map_word.ToForwardingAddress(); | |
631 array[new_top] = dest; | |
632 new_top = ((new_top + 1) & mask); | |
633 DCHECK(new_top != marking_deque_.bottom()); | |
634 #ifdef DEBUG | |
635 MarkBit mark_bit = Marking::MarkBitFrom(obj); | |
636 DCHECK(Marking::IsGrey(mark_bit) || | |
637 (obj->IsFiller() && Marking::IsWhite(mark_bit))); | |
638 #endif | |
639 } | |
640 } else if (obj->map() != filler_map) { | |
641 // Skip one word filler objects that appear on the | |
642 // stack when we perform in place array shift. | |
643 array[new_top] = obj; | |
644 new_top = ((new_top + 1) & mask); | |
645 DCHECK(new_top != marking_deque_.bottom()); | |
646 #ifdef DEBUG | |
647 MarkBit mark_bit = Marking::MarkBitFrom(obj); | |
648 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | |
649 DCHECK(Marking::IsGrey(mark_bit) || | |
650 (obj->IsFiller() && Marking::IsWhite(mark_bit)) || | |
651 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && | |
652 Marking::IsBlack(mark_bit))); | |
653 #endif | |
654 } | |
655 } | |
656 marking_deque_.set_top(new_top); | |
657 } | |
658 | |
659 | |
660 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) { | |
661 MarkBit map_mark_bit = Marking::MarkBitFrom(map); | |
662 if (Marking::IsWhite(map_mark_bit)) { | |
663 WhiteToGreyAndPush(map, map_mark_bit); | |
664 } | |
665 | |
666 IncrementalMarkingMarkingVisitor::IterateBody(map, obj); | |
667 | |
668 MarkBit mark_bit = Marking::MarkBitFrom(obj); | |
669 #if ENABLE_SLOW_DCHECKS | |
670 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | |
671 SLOW_DCHECK(Marking::IsGrey(mark_bit) || | |
672 (obj->IsFiller() && Marking::IsWhite(mark_bit)) || | |
673 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && | |
674 Marking::IsBlack(mark_bit))); | |
675 #endif | |
676 MarkBlackOrKeepBlack(obj, mark_bit, size); | |
677 } | |
678 | |
679 | |
680 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) { | |
681 intptr_t bytes_processed = 0; | |
682 Map* filler_map = heap_->one_pointer_filler_map(); | |
683 while (!marking_deque_.IsEmpty() && bytes_processed < bytes_to_process) { | |
684 HeapObject* obj = marking_deque_.Pop(); | |
685 | |
686 // Explicitly skip one word fillers. Incremental markbit patterns are | |
687 // correct only for objects that occupy at least two words. | |
688 Map* map = obj->map(); | |
689 if (map == filler_map) continue; | |
690 | |
691 int size = obj->SizeFromMap(map); | |
692 unscanned_bytes_of_large_object_ = 0; | |
693 VisitObject(map, obj, size); | |
694 int delta = (size - unscanned_bytes_of_large_object_); | |
695 // TODO(jochen): remove after http://crbug.com/381820 is resolved. | |
696 CHECK_LT(0, delta); | |
697 bytes_processed += delta; | |
698 } | |
699 return bytes_processed; | |
700 } | |
701 | |
702 | |
703 void IncrementalMarking::ProcessMarkingDeque() { | |
704 Map* filler_map = heap_->one_pointer_filler_map(); | |
705 while (!marking_deque_.IsEmpty()) { | |
706 HeapObject* obj = marking_deque_.Pop(); | |
707 | |
708 // Explicitly skip one word fillers. Incremental markbit patterns are | |
709 // correct only for objects that occupy at least two words. | |
710 Map* map = obj->map(); | |
711 if (map == filler_map) continue; | |
712 | |
713 VisitObject(map, obj, obj->SizeFromMap(map)); | |
714 } | |
715 } | |
716 | |
717 | |
718 void IncrementalMarking::Hurry() { | |
719 if (state() == MARKING) { | |
720 double start = 0.0; | |
721 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { | |
722 start = base::OS::TimeCurrentMillis(); | |
723 if (FLAG_trace_incremental_marking) { | |
724 PrintF("[IncrementalMarking] Hurry\n"); | |
725 } | |
726 } | |
727 // TODO(gc) hurry can mark objects it encounters black as mutator | |
728 // was stopped. | |
729 ProcessMarkingDeque(); | |
730 state_ = COMPLETE; | |
731 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { | |
732 double end = base::OS::TimeCurrentMillis(); | |
733 double delta = end - start; | |
734 heap_->tracer()->AddMarkingTime(delta); | |
735 if (FLAG_trace_incremental_marking) { | |
736 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n", | |
737 static_cast<int>(delta)); | |
738 } | |
739 } | |
740 } | |
741 | |
742 if (FLAG_cleanup_code_caches_at_gc) { | |
743 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache(); | |
744 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache)); | |
745 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(), | |
746 PolymorphicCodeCache::kSize); | |
747 } | |
748 | |
749 Object* context = heap_->native_contexts_list(); | |
750 while (!context->IsUndefined()) { | |
751 // GC can happen when the context is not fully initialized, | |
752 // so the cache can be undefined. | |
753 HeapObject* cache = HeapObject::cast( | |
754 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX)); | |
755 if (!cache->IsUndefined()) { | |
756 MarkBit mark_bit = Marking::MarkBitFrom(cache); | |
757 if (Marking::IsGrey(mark_bit)) { | |
758 Marking::GreyToBlack(mark_bit); | |
759 MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size()); | |
760 } | |
761 } | |
762 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); | |
763 } | |
764 } | |
765 | |
766 | |
767 void IncrementalMarking::Abort() { | |
768 if (IsStopped()) return; | |
769 if (FLAG_trace_incremental_marking) { | |
770 PrintF("[IncrementalMarking] Aborting.\n"); | |
771 } | |
772 heap_->new_space()->LowerInlineAllocationLimit(0); | |
773 IncrementalMarking::set_should_hurry(false); | |
774 ResetStepCounters(); | |
775 if (IsMarking()) { | |
776 PatchIncrementalMarkingRecordWriteStubs(heap_, | |
777 RecordWriteStub::STORE_BUFFER_ONLY); | |
778 DeactivateIncrementalWriteBarrier(); | |
779 | |
780 if (is_compacting_) { | |
781 LargeObjectIterator it(heap_->lo_space()); | |
782 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
783 Page* p = Page::FromAddress(obj->address()); | |
784 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | |
785 p->ClearFlag(Page::RESCAN_ON_EVACUATION); | |
786 } | |
787 } | |
788 } | |
789 } | |
790 heap_->isolate()->stack_guard()->ClearGC(); | |
791 state_ = STOPPED; | |
792 is_compacting_ = false; | |
793 } | |
794 | |
795 | |
796 void IncrementalMarking::Finalize() { | |
797 Hurry(); | |
798 state_ = STOPPED; | |
799 is_compacting_ = false; | |
800 heap_->new_space()->LowerInlineAllocationLimit(0); | |
801 IncrementalMarking::set_should_hurry(false); | |
802 ResetStepCounters(); | |
803 PatchIncrementalMarkingRecordWriteStubs(heap_, | |
804 RecordWriteStub::STORE_BUFFER_ONLY); | |
805 DeactivateIncrementalWriteBarrier(); | |
806 DCHECK(marking_deque_.IsEmpty()); | |
807 heap_->isolate()->stack_guard()->ClearGC(); | |
808 } | |
809 | |
810 | |
811 void IncrementalMarking::MarkingComplete(CompletionAction action) { | |
812 state_ = COMPLETE; | |
813 // We will set the stack guard to request a GC now. This will mean the rest | |
814 // of the GC gets performed as soon as possible (we can't do a GC here in a | |
815 // record-write context). If a few things get allocated between now and then | |
816 // that shouldn't make us do a scavenge and keep being incremental, so we set | |
817 // the should-hurry flag to indicate that there can't be much work left to do. | |
818 set_should_hurry(true); | |
819 if (FLAG_trace_incremental_marking) { | |
820 PrintF("[IncrementalMarking] Complete (normal).\n"); | |
821 } | |
822 if (action == GC_VIA_STACK_GUARD) { | |
823 heap_->isolate()->stack_guard()->RequestGC(); | |
824 } | |
825 } | |
826 | |
827 | |
828 void IncrementalMarking::OldSpaceStep(intptr_t allocated) { | |
829 if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) { | |
830 // TODO(hpayer): Let's play safe for now, but compaction should be | |
831 // in principle possible. | |
832 Start(PREVENT_COMPACTION); | |
833 } else { | |
834 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD); | |
835 } | |
836 } | |
837 | |
838 | |
839 void IncrementalMarking::Step(intptr_t allocated_bytes, | |
840 CompletionAction action) { | |
841 if (heap_->gc_state() != Heap::NOT_IN_GC || | |
842 !FLAG_incremental_marking || | |
843 !FLAG_incremental_marking_steps || | |
844 (state_ != SWEEPING && state_ != MARKING)) { | |
845 return; | |
846 } | |
847 | |
848 allocated_ += allocated_bytes; | |
849 | |
850 if (allocated_ < kAllocatedThreshold && | |
851 write_barriers_invoked_since_last_step_ < | |
852 kWriteBarriersInvokedThreshold) { | |
853 return; | |
854 } | |
855 | |
856 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; | |
857 | |
858 { | |
859 HistogramTimerScope incremental_marking_scope( | |
860 heap_->isolate()->counters()->gc_incremental_marking()); | |
861 double start = base::OS::TimeCurrentMillis(); | |
862 | |
863 // The marking speed is driven either by the allocation rate or by the rate | |
864 // at which we are having to check the color of objects in the write | |
865 // barrier. | |
866 // It is possible for a tight non-allocating loop to run a lot of write | |
867 // barriers before we get here and check them (marking can only take place | |
868 // on | |
869 // allocation), so to reduce the lumpiness we don't use the write barriers | |
870 // invoked since last step directly to determine the amount of work to do. | |
871 intptr_t bytes_to_process = | |
872 marking_speed_ * | |
873 Max(allocated_, write_barriers_invoked_since_last_step_); | |
874 allocated_ = 0; | |
875 write_barriers_invoked_since_last_step_ = 0; | |
876 | |
877 bytes_scanned_ += bytes_to_process; | |
878 intptr_t bytes_processed = 0; | |
879 | |
880 if (state_ == SWEEPING) { | |
881 if (heap_->mark_compact_collector()->sweeping_in_progress() && | |
882 heap_->mark_compact_collector()->IsSweepingCompleted()) { | |
883 heap_->mark_compact_collector()->EnsureSweepingCompleted(); | |
884 } | |
885 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { | |
886 bytes_scanned_ = 0; | |
887 StartMarking(PREVENT_COMPACTION); | |
888 } | |
889 } else if (state_ == MARKING) { | |
890 bytes_processed = ProcessMarkingDeque(bytes_to_process); | |
891 if (marking_deque_.IsEmpty()) MarkingComplete(action); | |
892 } | |
893 | |
894 steps_count_++; | |
895 | |
896 bool speed_up = false; | |
897 | |
898 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { | |
899 if (FLAG_trace_gc) { | |
900 PrintPID("Speed up marking after %d steps\n", | |
901 static_cast<int>(kMarkingSpeedAccellerationInterval)); | |
902 } | |
903 speed_up = true; | |
904 } | |
905 | |
906 bool space_left_is_very_small = | |
907 (old_generation_space_available_at_start_of_incremental_ < 10 * MB); | |
908 | |
909 bool only_1_nth_of_space_that_was_available_still_left = | |
910 (SpaceLeftInOldSpace() * (marking_speed_ + 1) < | |
911 old_generation_space_available_at_start_of_incremental_); | |
912 | |
913 if (space_left_is_very_small || | |
914 only_1_nth_of_space_that_was_available_still_left) { | |
915 if (FLAG_trace_gc) | |
916 PrintPID("Speed up marking because of low space left\n"); | |
917 speed_up = true; | |
918 } | |
919 | |
920 bool size_of_old_space_multiplied_by_n_during_marking = | |
921 (heap_->PromotedTotalSize() > | |
922 (marking_speed_ + 1) * | |
923 old_generation_space_used_at_start_of_incremental_); | |
924 if (size_of_old_space_multiplied_by_n_during_marking) { | |
925 speed_up = true; | |
926 if (FLAG_trace_gc) { | |
927 PrintPID("Speed up marking because of heap size increase\n"); | |
928 } | |
929 } | |
930 | |
931 int64_t promoted_during_marking = | |
932 heap_->PromotedTotalSize() - | |
933 old_generation_space_used_at_start_of_incremental_; | |
934 intptr_t delay = marking_speed_ * MB; | |
935 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); | |
936 | |
937 // We try to scan at at least twice the speed that we are allocating. | |
938 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { | |
939 if (FLAG_trace_gc) { | |
940 PrintPID("Speed up marking because marker was not keeping up\n"); | |
941 } | |
942 speed_up = true; | |
943 } | |
944 | |
945 if (speed_up) { | |
946 if (state_ != MARKING) { | |
947 if (FLAG_trace_gc) { | |
948 PrintPID("Postponing speeding up marking until marking starts\n"); | |
949 } | |
950 } else { | |
951 marking_speed_ += kMarkingSpeedAccelleration; | |
952 marking_speed_ = static_cast<int>( | |
953 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3))); | |
954 if (FLAG_trace_gc) { | |
955 PrintPID("Marking speed increased to %d\n", marking_speed_); | |
956 } | |
957 } | |
958 } | |
959 | |
960 double end = base::OS::TimeCurrentMillis(); | |
961 double duration = (end - start); | |
962 // Note that we report zero bytes here when sweeping was in progress or | |
963 // when we just started incremental marking. In these cases we did not | |
964 // process the marking deque. | |
965 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed); | |
966 } | |
967 } | |
968 | |
969 | |
970 void IncrementalMarking::ResetStepCounters() { | |
971 steps_count_ = 0; | |
972 old_generation_space_available_at_start_of_incremental_ = | |
973 SpaceLeftInOldSpace(); | |
974 old_generation_space_used_at_start_of_incremental_ = | |
975 heap_->PromotedTotalSize(); | |
976 bytes_rescanned_ = 0; | |
977 marking_speed_ = kInitialMarkingSpeed; | |
978 bytes_scanned_ = 0; | |
979 write_barriers_invoked_since_last_step_ = 0; | |
980 } | |
981 | |
982 | |
983 int64_t IncrementalMarking::SpaceLeftInOldSpace() { | |
984 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); | |
985 } | |
986 | |
987 } } // namespace v8::internal | |
OLD | NEW |