OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/v8.h" | |
6 | |
7 #include "src/base/atomicops.h" | |
8 #include "src/code-stubs.h" | |
9 #include "src/compilation-cache.h" | |
10 #include "src/cpu-profiler.h" | |
11 #include "src/deoptimizer.h" | |
12 #include "src/execution.h" | |
13 #include "src/gdb-jit.h" | |
14 #include "src/global-handles.h" | |
15 #include "src/heap-profiler.h" | |
16 #include "src/ic-inl.h" | |
17 #include "src/incremental-marking.h" | |
18 #include "src/mark-compact.h" | |
19 #include "src/objects-visiting.h" | |
20 #include "src/objects-visiting-inl.h" | |
21 #include "src/spaces-inl.h" | |
22 #include "src/stub-cache.h" | |
23 #include "src/sweeper-thread.h" | |
24 | |
25 namespace v8 { | |
26 namespace internal { | |
27 | |
28 | |
29 const char* Marking::kWhiteBitPattern = "00"; | |
30 const char* Marking::kBlackBitPattern = "10"; | |
31 const char* Marking::kGreyBitPattern = "11"; | |
32 const char* Marking::kImpossibleBitPattern = "01"; | |
33 | |
34 | |
35 // ------------------------------------------------------------------------- | |
36 // MarkCompactCollector | |
37 | |
38 MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT | |
39 #ifdef DEBUG | |
40 state_(IDLE), | |
41 #endif | |
42 sweep_precisely_(false), | |
43 reduce_memory_footprint_(false), | |
44 abort_incremental_marking_(false), | |
45 marking_parity_(ODD_MARKING_PARITY), | |
46 compacting_(false), | |
47 was_marked_incrementally_(false), | |
48 sweeping_in_progress_(false), | |
49 pending_sweeper_jobs_semaphore_(0), | |
50 sequential_sweeping_(false), | |
51 migration_slots_buffer_(NULL), | |
52 heap_(heap), | |
53 code_flusher_(NULL), | |
54 have_code_to_deoptimize_(false) { } | |
55 | |
56 #ifdef VERIFY_HEAP | |
57 class VerifyMarkingVisitor: public ObjectVisitor { | |
58 public: | |
59 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} | |
60 | |
61 void VisitPointers(Object** start, Object** end) { | |
62 for (Object** current = start; current < end; current++) { | |
63 if ((*current)->IsHeapObject()) { | |
64 HeapObject* object = HeapObject::cast(*current); | |
65 CHECK(heap_->mark_compact_collector()->IsMarked(object)); | |
66 } | |
67 } | |
68 } | |
69 | |
70 void VisitEmbeddedPointer(RelocInfo* rinfo) { | |
71 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); | |
72 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) { | |
73 Object* p = rinfo->target_object(); | |
74 VisitPointer(&p); | |
75 } | |
76 } | |
77 | |
78 void VisitCell(RelocInfo* rinfo) { | |
79 Code* code = rinfo->host(); | |
80 DCHECK(rinfo->rmode() == RelocInfo::CELL); | |
81 if (!code->IsWeakObject(rinfo->target_cell())) { | |
82 ObjectVisitor::VisitCell(rinfo); | |
83 } | |
84 } | |
85 | |
86 private: | |
87 Heap* heap_; | |
88 }; | |
89 | |
90 | |
91 static void VerifyMarking(Heap* heap, Address bottom, Address top) { | |
92 VerifyMarkingVisitor visitor(heap); | |
93 HeapObject* object; | |
94 Address next_object_must_be_here_or_later = bottom; | |
95 | |
96 for (Address current = bottom; | |
97 current < top; | |
98 current += kPointerSize) { | |
99 object = HeapObject::FromAddress(current); | |
100 if (MarkCompactCollector::IsMarked(object)) { | |
101 CHECK(current >= next_object_must_be_here_or_later); | |
102 object->Iterate(&visitor); | |
103 next_object_must_be_here_or_later = current + object->Size(); | |
104 } | |
105 } | |
106 } | |
107 | |
108 | |
109 static void VerifyMarking(NewSpace* space) { | |
110 Address end = space->top(); | |
111 NewSpacePageIterator it(space->bottom(), end); | |
112 // The bottom position is at the start of its page. Allows us to use | |
113 // page->area_start() as start of range on all pages. | |
114 CHECK_EQ(space->bottom(), | |
115 NewSpacePage::FromAddress(space->bottom())->area_start()); | |
116 while (it.has_next()) { | |
117 NewSpacePage* page = it.next(); | |
118 Address limit = it.has_next() ? page->area_end() : end; | |
119 CHECK(limit == end || !page->Contains(end)); | |
120 VerifyMarking(space->heap(), page->area_start(), limit); | |
121 } | |
122 } | |
123 | |
124 | |
125 static void VerifyMarking(PagedSpace* space) { | |
126 PageIterator it(space); | |
127 | |
128 while (it.has_next()) { | |
129 Page* p = it.next(); | |
130 VerifyMarking(space->heap(), p->area_start(), p->area_end()); | |
131 } | |
132 } | |
133 | |
134 | |
135 static void VerifyMarking(Heap* heap) { | |
136 VerifyMarking(heap->old_pointer_space()); | |
137 VerifyMarking(heap->old_data_space()); | |
138 VerifyMarking(heap->code_space()); | |
139 VerifyMarking(heap->cell_space()); | |
140 VerifyMarking(heap->property_cell_space()); | |
141 VerifyMarking(heap->map_space()); | |
142 VerifyMarking(heap->new_space()); | |
143 | |
144 VerifyMarkingVisitor visitor(heap); | |
145 | |
146 LargeObjectIterator it(heap->lo_space()); | |
147 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
148 if (MarkCompactCollector::IsMarked(obj)) { | |
149 obj->Iterate(&visitor); | |
150 } | |
151 } | |
152 | |
153 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); | |
154 } | |
155 | |
156 | |
157 class VerifyEvacuationVisitor: public ObjectVisitor { | |
158 public: | |
159 void VisitPointers(Object** start, Object** end) { | |
160 for (Object** current = start; current < end; current++) { | |
161 if ((*current)->IsHeapObject()) { | |
162 HeapObject* object = HeapObject::cast(*current); | |
163 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); | |
164 } | |
165 } | |
166 } | |
167 }; | |
168 | |
169 | |
170 static void VerifyEvacuation(Page* page) { | |
171 VerifyEvacuationVisitor visitor; | |
172 HeapObjectIterator iterator(page, NULL); | |
173 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; | |
174 heap_object = iterator.Next()) { | |
175 // We skip free space objects. | |
176 if (!heap_object->IsFiller()) { | |
177 heap_object->Iterate(&visitor); | |
178 } | |
179 } | |
180 } | |
181 | |
182 | |
183 static void VerifyEvacuation(NewSpace* space) { | |
184 NewSpacePageIterator it(space->bottom(), space->top()); | |
185 VerifyEvacuationVisitor visitor; | |
186 | |
187 while (it.has_next()) { | |
188 NewSpacePage* page = it.next(); | |
189 Address current = page->area_start(); | |
190 Address limit = it.has_next() ? page->area_end() : space->top(); | |
191 CHECK(limit == space->top() || !page->Contains(space->top())); | |
192 while (current < limit) { | |
193 HeapObject* object = HeapObject::FromAddress(current); | |
194 object->Iterate(&visitor); | |
195 current += object->Size(); | |
196 } | |
197 } | |
198 } | |
199 | |
200 | |
201 static void VerifyEvacuation(Heap* heap, PagedSpace* space) { | |
202 if (!space->swept_precisely()) return; | |
203 if (FLAG_use_allocation_folding && | |
204 (space == heap->old_pointer_space() || space == heap->old_data_space())) { | |
205 return; | |
206 } | |
207 PageIterator it(space); | |
208 | |
209 while (it.has_next()) { | |
210 Page* p = it.next(); | |
211 if (p->IsEvacuationCandidate()) continue; | |
212 VerifyEvacuation(p); | |
213 } | |
214 } | |
215 | |
216 | |
217 static void VerifyEvacuation(Heap* heap) { | |
218 VerifyEvacuation(heap, heap->old_pointer_space()); | |
219 VerifyEvacuation(heap, heap->old_data_space()); | |
220 VerifyEvacuation(heap, heap->code_space()); | |
221 VerifyEvacuation(heap, heap->cell_space()); | |
222 VerifyEvacuation(heap, heap->property_cell_space()); | |
223 VerifyEvacuation(heap, heap->map_space()); | |
224 VerifyEvacuation(heap->new_space()); | |
225 | |
226 VerifyEvacuationVisitor visitor; | |
227 heap->IterateStrongRoots(&visitor, VISIT_ALL); | |
228 } | |
229 #endif // VERIFY_HEAP | |
230 | |
231 | |
232 #ifdef DEBUG | |
233 class VerifyNativeContextSeparationVisitor: public ObjectVisitor { | |
234 public: | |
235 VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {} | |
236 | |
237 void VisitPointers(Object** start, Object** end) { | |
238 for (Object** current = start; current < end; current++) { | |
239 if ((*current)->IsHeapObject()) { | |
240 HeapObject* object = HeapObject::cast(*current); | |
241 if (object->IsString()) continue; | |
242 switch (object->map()->instance_type()) { | |
243 case JS_FUNCTION_TYPE: | |
244 CheckContext(JSFunction::cast(object)->context()); | |
245 break; | |
246 case JS_GLOBAL_PROXY_TYPE: | |
247 CheckContext(JSGlobalProxy::cast(object)->native_context()); | |
248 break; | |
249 case JS_GLOBAL_OBJECT_TYPE: | |
250 case JS_BUILTINS_OBJECT_TYPE: | |
251 CheckContext(GlobalObject::cast(object)->native_context()); | |
252 break; | |
253 case JS_ARRAY_TYPE: | |
254 case JS_DATE_TYPE: | |
255 case JS_OBJECT_TYPE: | |
256 case JS_REGEXP_TYPE: | |
257 VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset)); | |
258 break; | |
259 case MAP_TYPE: | |
260 VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset)); | |
261 VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset)); | |
262 break; | |
263 case FIXED_ARRAY_TYPE: | |
264 if (object->IsContext()) { | |
265 CheckContext(object); | |
266 } else { | |
267 FixedArray* array = FixedArray::cast(object); | |
268 int length = array->length(); | |
269 // Set array length to zero to prevent cycles while iterating | |
270 // over array bodies, this is easier than intrusive marking. | |
271 array->set_length(0); | |
272 array->IterateBody( | |
273 FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this); | |
274 array->set_length(length); | |
275 } | |
276 break; | |
277 case CELL_TYPE: | |
278 case JS_PROXY_TYPE: | |
279 case JS_VALUE_TYPE: | |
280 case TYPE_FEEDBACK_INFO_TYPE: | |
281 object->Iterate(this); | |
282 break; | |
283 case DECLARED_ACCESSOR_INFO_TYPE: | |
284 case EXECUTABLE_ACCESSOR_INFO_TYPE: | |
285 case BYTE_ARRAY_TYPE: | |
286 case CALL_HANDLER_INFO_TYPE: | |
287 case CODE_TYPE: | |
288 case FIXED_DOUBLE_ARRAY_TYPE: | |
289 case HEAP_NUMBER_TYPE: | |
290 case MUTABLE_HEAP_NUMBER_TYPE: | |
291 case INTERCEPTOR_INFO_TYPE: | |
292 case ODDBALL_TYPE: | |
293 case SCRIPT_TYPE: | |
294 case SHARED_FUNCTION_INFO_TYPE: | |
295 break; | |
296 default: | |
297 UNREACHABLE(); | |
298 } | |
299 } | |
300 } | |
301 } | |
302 | |
303 private: | |
304 void CheckContext(Object* context) { | |
305 if (!context->IsContext()) return; | |
306 Context* native_context = Context::cast(context)->native_context(); | |
307 if (current_native_context_ == NULL) { | |
308 current_native_context_ = native_context; | |
309 } else { | |
310 CHECK_EQ(current_native_context_, native_context); | |
311 } | |
312 } | |
313 | |
314 Context* current_native_context_; | |
315 }; | |
316 | |
317 | |
318 static void VerifyNativeContextSeparation(Heap* heap) { | |
319 HeapObjectIterator it(heap->code_space()); | |
320 | |
321 for (Object* object = it.Next(); object != NULL; object = it.Next()) { | |
322 VerifyNativeContextSeparationVisitor visitor; | |
323 Code::cast(object)->CodeIterateBody(&visitor); | |
324 } | |
325 } | |
326 #endif | |
327 | |
328 | |
329 void MarkCompactCollector::SetUp() { | |
330 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space())); | |
331 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space())); | |
332 } | |
333 | |
334 | |
335 void MarkCompactCollector::TearDown() { | |
336 AbortCompaction(); | |
337 } | |
338 | |
339 | |
340 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { | |
341 p->MarkEvacuationCandidate(); | |
342 evacuation_candidates_.Add(p); | |
343 } | |
344 | |
345 | |
346 static void TraceFragmentation(PagedSpace* space) { | |
347 int number_of_pages = space->CountTotalPages(); | |
348 intptr_t reserved = (number_of_pages * space->AreaSize()); | |
349 intptr_t free = reserved - space->SizeOfObjects(); | |
350 PrintF("[%s]: %d pages, %d (%.1f%%) free\n", | |
351 AllocationSpaceName(space->identity()), | |
352 number_of_pages, | |
353 static_cast<int>(free), | |
354 static_cast<double>(free) * 100 / reserved); | |
355 } | |
356 | |
357 | |
358 bool MarkCompactCollector::StartCompaction(CompactionMode mode) { | |
359 if (!compacting_) { | |
360 DCHECK(evacuation_candidates_.length() == 0); | |
361 | |
362 #ifdef ENABLE_GDB_JIT_INTERFACE | |
363 // If GDBJIT interface is active disable compaction. | |
364 if (FLAG_gdbjit) return false; | |
365 #endif | |
366 | |
367 CollectEvacuationCandidates(heap()->old_pointer_space()); | |
368 CollectEvacuationCandidates(heap()->old_data_space()); | |
369 | |
370 if (FLAG_compact_code_space && | |
371 (mode == NON_INCREMENTAL_COMPACTION || | |
372 FLAG_incremental_code_compaction)) { | |
373 CollectEvacuationCandidates(heap()->code_space()); | |
374 } else if (FLAG_trace_fragmentation) { | |
375 TraceFragmentation(heap()->code_space()); | |
376 } | |
377 | |
378 if (FLAG_trace_fragmentation) { | |
379 TraceFragmentation(heap()->map_space()); | |
380 TraceFragmentation(heap()->cell_space()); | |
381 TraceFragmentation(heap()->property_cell_space()); | |
382 } | |
383 | |
384 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); | |
385 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); | |
386 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); | |
387 | |
388 compacting_ = evacuation_candidates_.length() > 0; | |
389 } | |
390 | |
391 return compacting_; | |
392 } | |
393 | |
394 | |
395 void MarkCompactCollector::CollectGarbage() { | |
396 // Make sure that Prepare() has been called. The individual steps below will | |
397 // update the state as they proceed. | |
398 DCHECK(state_ == PREPARE_GC); | |
399 | |
400 MarkLiveObjects(); | |
401 DCHECK(heap_->incremental_marking()->IsStopped()); | |
402 | |
403 if (FLAG_collect_maps) ClearNonLiveReferences(); | |
404 | |
405 ClearWeakCollections(); | |
406 | |
407 #ifdef VERIFY_HEAP | |
408 if (FLAG_verify_heap) { | |
409 VerifyMarking(heap_); | |
410 } | |
411 #endif | |
412 | |
413 SweepSpaces(); | |
414 | |
415 #ifdef DEBUG | |
416 if (FLAG_verify_native_context_separation) { | |
417 VerifyNativeContextSeparation(heap_); | |
418 } | |
419 #endif | |
420 | |
421 #ifdef VERIFY_HEAP | |
422 if (heap()->weak_embedded_objects_verification_enabled()) { | |
423 VerifyWeakEmbeddedObjectsInCode(); | |
424 } | |
425 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) { | |
426 VerifyOmittedMapChecks(); | |
427 } | |
428 #endif | |
429 | |
430 Finish(); | |
431 | |
432 if (marking_parity_ == EVEN_MARKING_PARITY) { | |
433 marking_parity_ = ODD_MARKING_PARITY; | |
434 } else { | |
435 DCHECK(marking_parity_ == ODD_MARKING_PARITY); | |
436 marking_parity_ = EVEN_MARKING_PARITY; | |
437 } | |
438 } | |
439 | |
440 | |
441 #ifdef VERIFY_HEAP | |
442 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { | |
443 PageIterator it(space); | |
444 | |
445 while (it.has_next()) { | |
446 Page* p = it.next(); | |
447 CHECK(p->markbits()->IsClean()); | |
448 CHECK_EQ(0, p->LiveBytes()); | |
449 } | |
450 } | |
451 | |
452 | |
453 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { | |
454 NewSpacePageIterator it(space->bottom(), space->top()); | |
455 | |
456 while (it.has_next()) { | |
457 NewSpacePage* p = it.next(); | |
458 CHECK(p->markbits()->IsClean()); | |
459 CHECK_EQ(0, p->LiveBytes()); | |
460 } | |
461 } | |
462 | |
463 | |
464 void MarkCompactCollector::VerifyMarkbitsAreClean() { | |
465 VerifyMarkbitsAreClean(heap_->old_pointer_space()); | |
466 VerifyMarkbitsAreClean(heap_->old_data_space()); | |
467 VerifyMarkbitsAreClean(heap_->code_space()); | |
468 VerifyMarkbitsAreClean(heap_->cell_space()); | |
469 VerifyMarkbitsAreClean(heap_->property_cell_space()); | |
470 VerifyMarkbitsAreClean(heap_->map_space()); | |
471 VerifyMarkbitsAreClean(heap_->new_space()); | |
472 | |
473 LargeObjectIterator it(heap_->lo_space()); | |
474 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
475 MarkBit mark_bit = Marking::MarkBitFrom(obj); | |
476 CHECK(Marking::IsWhite(mark_bit)); | |
477 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes()); | |
478 } | |
479 } | |
480 | |
481 | |
482 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() { | |
483 HeapObjectIterator code_iterator(heap()->code_space()); | |
484 for (HeapObject* obj = code_iterator.Next(); | |
485 obj != NULL; | |
486 obj = code_iterator.Next()) { | |
487 Code* code = Code::cast(obj); | |
488 if (!code->is_optimized_code() && !code->is_weak_stub()) continue; | |
489 if (WillBeDeoptimized(code)) continue; | |
490 code->VerifyEmbeddedObjectsDependency(); | |
491 } | |
492 } | |
493 | |
494 | |
495 void MarkCompactCollector::VerifyOmittedMapChecks() { | |
496 HeapObjectIterator iterator(heap()->map_space()); | |
497 for (HeapObject* obj = iterator.Next(); | |
498 obj != NULL; | |
499 obj = iterator.Next()) { | |
500 Map* map = Map::cast(obj); | |
501 map->VerifyOmittedMapChecks(); | |
502 } | |
503 } | |
504 #endif // VERIFY_HEAP | |
505 | |
506 | |
507 static void ClearMarkbitsInPagedSpace(PagedSpace* space) { | |
508 PageIterator it(space); | |
509 | |
510 while (it.has_next()) { | |
511 Bitmap::Clear(it.next()); | |
512 } | |
513 } | |
514 | |
515 | |
516 static void ClearMarkbitsInNewSpace(NewSpace* space) { | |
517 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); | |
518 | |
519 while (it.has_next()) { | |
520 Bitmap::Clear(it.next()); | |
521 } | |
522 } | |
523 | |
524 | |
525 void MarkCompactCollector::ClearMarkbits() { | |
526 ClearMarkbitsInPagedSpace(heap_->code_space()); | |
527 ClearMarkbitsInPagedSpace(heap_->map_space()); | |
528 ClearMarkbitsInPagedSpace(heap_->old_pointer_space()); | |
529 ClearMarkbitsInPagedSpace(heap_->old_data_space()); | |
530 ClearMarkbitsInPagedSpace(heap_->cell_space()); | |
531 ClearMarkbitsInPagedSpace(heap_->property_cell_space()); | |
532 ClearMarkbitsInNewSpace(heap_->new_space()); | |
533 | |
534 LargeObjectIterator it(heap_->lo_space()); | |
535 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
536 MarkBit mark_bit = Marking::MarkBitFrom(obj); | |
537 mark_bit.Clear(); | |
538 mark_bit.Next().Clear(); | |
539 Page::FromAddress(obj->address())->ResetProgressBar(); | |
540 Page::FromAddress(obj->address())->ResetLiveBytes(); | |
541 } | |
542 } | |
543 | |
544 | |
545 class MarkCompactCollector::SweeperTask : public v8::Task { | |
546 public: | |
547 SweeperTask(Heap* heap, PagedSpace* space) | |
548 : heap_(heap), space_(space) {} | |
549 | |
550 virtual ~SweeperTask() {} | |
551 | |
552 private: | |
553 // v8::Task overrides. | |
554 virtual void Run() V8_OVERRIDE { | |
555 heap_->mark_compact_collector()->SweepInParallel(space_, 0); | |
556 heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal(); | |
557 } | |
558 | |
559 Heap* heap_; | |
560 PagedSpace* space_; | |
561 | |
562 DISALLOW_COPY_AND_ASSIGN(SweeperTask); | |
563 }; | |
564 | |
565 | |
566 void MarkCompactCollector::StartSweeperThreads() { | |
567 DCHECK(free_list_old_pointer_space_.get()->IsEmpty()); | |
568 DCHECK(free_list_old_data_space_.get()->IsEmpty()); | |
569 sweeping_in_progress_ = true; | |
570 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { | |
571 isolate()->sweeper_threads()[i]->StartSweeping(); | |
572 } | |
573 if (FLAG_job_based_sweeping) { | |
574 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
575 new SweeperTask(heap(), heap()->old_data_space()), | |
576 v8::Platform::kShortRunningTask); | |
577 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
578 new SweeperTask(heap(), heap()->old_pointer_space()), | |
579 v8::Platform::kShortRunningTask); | |
580 } | |
581 } | |
582 | |
583 | |
584 void MarkCompactCollector::EnsureSweepingCompleted() { | |
585 DCHECK(sweeping_in_progress_ == true); | |
586 | |
587 // If sweeping is not completed, we try to complete it here. If we do not | |
588 // have sweeper threads we have to complete since we do not have a good | |
589 // indicator for a swept space in that case. | |
590 if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) { | |
591 SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0); | |
592 SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0); | |
593 } | |
594 | |
595 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { | |
596 isolate()->sweeper_threads()[i]->WaitForSweeperThread(); | |
597 } | |
598 if (FLAG_job_based_sweeping) { | |
599 // Wait twice for both jobs. | |
600 pending_sweeper_jobs_semaphore_.Wait(); | |
601 pending_sweeper_jobs_semaphore_.Wait(); | |
602 } | |
603 ParallelSweepSpacesComplete(); | |
604 sweeping_in_progress_ = false; | |
605 RefillFreeList(heap()->paged_space(OLD_DATA_SPACE)); | |
606 RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE)); | |
607 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); | |
608 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); | |
609 | |
610 #ifdef VERIFY_HEAP | |
611 if (FLAG_verify_heap) { | |
612 VerifyEvacuation(heap_); | |
613 } | |
614 #endif | |
615 } | |
616 | |
617 | |
618 bool MarkCompactCollector::IsSweepingCompleted() { | |
619 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { | |
620 if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) { | |
621 return false; | |
622 } | |
623 } | |
624 | |
625 if (FLAG_job_based_sweeping) { | |
626 if (!pending_sweeper_jobs_semaphore_.WaitFor( | |
627 base::TimeDelta::FromSeconds(0))) { | |
628 return false; | |
629 } | |
630 pending_sweeper_jobs_semaphore_.Signal(); | |
631 } | |
632 | |
633 return true; | |
634 } | |
635 | |
636 | |
637 void MarkCompactCollector::RefillFreeList(PagedSpace* space) { | |
638 FreeList* free_list; | |
639 | |
640 if (space == heap()->old_pointer_space()) { | |
641 free_list = free_list_old_pointer_space_.get(); | |
642 } else if (space == heap()->old_data_space()) { | |
643 free_list = free_list_old_data_space_.get(); | |
644 } else { | |
645 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure | |
646 // to only refill them for old data and pointer spaces. | |
647 return; | |
648 } | |
649 | |
650 intptr_t freed_bytes = space->free_list()->Concatenate(free_list); | |
651 space->AddToAccountingStats(freed_bytes); | |
652 space->DecrementUnsweptFreeBytes(freed_bytes); | |
653 } | |
654 | |
655 | |
656 bool MarkCompactCollector::AreSweeperThreadsActivated() { | |
657 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping; | |
658 } | |
659 | |
660 | |
661 void Marking::TransferMark(Address old_start, Address new_start) { | |
662 // This is only used when resizing an object. | |
663 DCHECK(MemoryChunk::FromAddress(old_start) == | |
664 MemoryChunk::FromAddress(new_start)); | |
665 | |
666 if (!heap_->incremental_marking()->IsMarking()) return; | |
667 | |
668 // If the mark doesn't move, we don't check the color of the object. | |
669 // It doesn't matter whether the object is black, since it hasn't changed | |
670 // size, so the adjustment to the live data count will be zero anyway. | |
671 if (old_start == new_start) return; | |
672 | |
673 MarkBit new_mark_bit = MarkBitFrom(new_start); | |
674 MarkBit old_mark_bit = MarkBitFrom(old_start); | |
675 | |
676 #ifdef DEBUG | |
677 ObjectColor old_color = Color(old_mark_bit); | |
678 #endif | |
679 | |
680 if (Marking::IsBlack(old_mark_bit)) { | |
681 old_mark_bit.Clear(); | |
682 DCHECK(IsWhite(old_mark_bit)); | |
683 Marking::MarkBlack(new_mark_bit); | |
684 return; | |
685 } else if (Marking::IsGrey(old_mark_bit)) { | |
686 old_mark_bit.Clear(); | |
687 old_mark_bit.Next().Clear(); | |
688 DCHECK(IsWhite(old_mark_bit)); | |
689 heap_->incremental_marking()->WhiteToGreyAndPush( | |
690 HeapObject::FromAddress(new_start), new_mark_bit); | |
691 heap_->incremental_marking()->RestartIfNotMarking(); | |
692 } | |
693 | |
694 #ifdef DEBUG | |
695 ObjectColor new_color = Color(new_mark_bit); | |
696 DCHECK(new_color == old_color); | |
697 #endif | |
698 } | |
699 | |
700 | |
701 const char* AllocationSpaceName(AllocationSpace space) { | |
702 switch (space) { | |
703 case NEW_SPACE: return "NEW_SPACE"; | |
704 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE"; | |
705 case OLD_DATA_SPACE: return "OLD_DATA_SPACE"; | |
706 case CODE_SPACE: return "CODE_SPACE"; | |
707 case MAP_SPACE: return "MAP_SPACE"; | |
708 case CELL_SPACE: return "CELL_SPACE"; | |
709 case PROPERTY_CELL_SPACE: | |
710 return "PROPERTY_CELL_SPACE"; | |
711 case LO_SPACE: return "LO_SPACE"; | |
712 default: | |
713 UNREACHABLE(); | |
714 } | |
715 | |
716 return NULL; | |
717 } | |
718 | |
719 | |
720 // Returns zero for pages that have so little fragmentation that it is not | |
721 // worth defragmenting them. Otherwise a positive integer that gives an | |
722 // estimate of fragmentation on an arbitrary scale. | |
723 static int FreeListFragmentation(PagedSpace* space, Page* p) { | |
724 // If page was not swept then there are no free list items on it. | |
725 if (!p->WasSwept()) { | |
726 if (FLAG_trace_fragmentation) { | |
727 PrintF("%p [%s]: %d bytes live (unswept)\n", | |
728 reinterpret_cast<void*>(p), | |
729 AllocationSpaceName(space->identity()), | |
730 p->LiveBytes()); | |
731 } | |
732 return 0; | |
733 } | |
734 | |
735 PagedSpace::SizeStats sizes; | |
736 space->ObtainFreeListStatistics(p, &sizes); | |
737 | |
738 intptr_t ratio; | |
739 intptr_t ratio_threshold; | |
740 intptr_t area_size = space->AreaSize(); | |
741 if (space->identity() == CODE_SPACE) { | |
742 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / | |
743 area_size; | |
744 ratio_threshold = 10; | |
745 } else { | |
746 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / | |
747 area_size; | |
748 ratio_threshold = 15; | |
749 } | |
750 | |
751 if (FLAG_trace_fragmentation) { | |
752 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", | |
753 reinterpret_cast<void*>(p), | |
754 AllocationSpaceName(space->identity()), | |
755 static_cast<int>(sizes.small_size_), | |
756 static_cast<double>(sizes.small_size_ * 100) / | |
757 area_size, | |
758 static_cast<int>(sizes.medium_size_), | |
759 static_cast<double>(sizes.medium_size_ * 100) / | |
760 area_size, | |
761 static_cast<int>(sizes.large_size_), | |
762 static_cast<double>(sizes.large_size_ * 100) / | |
763 area_size, | |
764 static_cast<int>(sizes.huge_size_), | |
765 static_cast<double>(sizes.huge_size_ * 100) / | |
766 area_size, | |
767 (ratio > ratio_threshold) ? "[fragmented]" : ""); | |
768 } | |
769 | |
770 if (FLAG_always_compact && sizes.Total() != area_size) { | |
771 return 1; | |
772 } | |
773 | |
774 if (ratio <= ratio_threshold) return 0; // Not fragmented. | |
775 | |
776 return static_cast<int>(ratio - ratio_threshold); | |
777 } | |
778 | |
779 | |
780 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { | |
781 DCHECK(space->identity() == OLD_POINTER_SPACE || | |
782 space->identity() == OLD_DATA_SPACE || | |
783 space->identity() == CODE_SPACE); | |
784 | |
785 static const int kMaxMaxEvacuationCandidates = 1000; | |
786 int number_of_pages = space->CountTotalPages(); | |
787 int max_evacuation_candidates = | |
788 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1); | |
789 | |
790 if (FLAG_stress_compaction || FLAG_always_compact) { | |
791 max_evacuation_candidates = kMaxMaxEvacuationCandidates; | |
792 } | |
793 | |
794 class Candidate { | |
795 public: | |
796 Candidate() : fragmentation_(0), page_(NULL) { } | |
797 Candidate(int f, Page* p) : fragmentation_(f), page_(p) { } | |
798 | |
799 int fragmentation() { return fragmentation_; } | |
800 Page* page() { return page_; } | |
801 | |
802 private: | |
803 int fragmentation_; | |
804 Page* page_; | |
805 }; | |
806 | |
807 enum CompactionMode { | |
808 COMPACT_FREE_LISTS, | |
809 REDUCE_MEMORY_FOOTPRINT | |
810 }; | |
811 | |
812 CompactionMode mode = COMPACT_FREE_LISTS; | |
813 | |
814 intptr_t reserved = number_of_pages * space->AreaSize(); | |
815 intptr_t over_reserved = reserved - space->SizeOfObjects(); | |
816 static const intptr_t kFreenessThreshold = 50; | |
817 | |
818 if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) { | |
819 // If reduction of memory footprint was requested, we are aggressive | |
820 // about choosing pages to free. We expect that half-empty pages | |
821 // are easier to compact so slightly bump the limit. | |
822 mode = REDUCE_MEMORY_FOOTPRINT; | |
823 max_evacuation_candidates += 2; | |
824 } | |
825 | |
826 | |
827 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) { | |
828 // If over-usage is very high (more than a third of the space), we | |
829 // try to free all mostly empty pages. We expect that almost empty | |
830 // pages are even easier to compact so bump the limit even more. | |
831 mode = REDUCE_MEMORY_FOOTPRINT; | |
832 max_evacuation_candidates *= 2; | |
833 } | |
834 | |
835 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) { | |
836 PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), " | |
837 "evacuation candidate limit: %d\n", | |
838 static_cast<double>(over_reserved) / MB, | |
839 static_cast<double>(reserved) / MB, | |
840 static_cast<int>(kFreenessThreshold), | |
841 max_evacuation_candidates); | |
842 } | |
843 | |
844 intptr_t estimated_release = 0; | |
845 | |
846 Candidate candidates[kMaxMaxEvacuationCandidates]; | |
847 | |
848 max_evacuation_candidates = | |
849 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates); | |
850 | |
851 int count = 0; | |
852 int fragmentation = 0; | |
853 Candidate* least = NULL; | |
854 | |
855 PageIterator it(space); | |
856 if (it.has_next()) it.next(); // Never compact the first page. | |
857 | |
858 while (it.has_next()) { | |
859 Page* p = it.next(); | |
860 p->ClearEvacuationCandidate(); | |
861 | |
862 if (FLAG_stress_compaction) { | |
863 unsigned int counter = space->heap()->ms_count(); | |
864 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; | |
865 if ((counter & 1) == (page_number & 1)) fragmentation = 1; | |
866 } else if (mode == REDUCE_MEMORY_FOOTPRINT) { | |
867 // Don't try to release too many pages. | |
868 if (estimated_release >= over_reserved) { | |
869 continue; | |
870 } | |
871 | |
872 intptr_t free_bytes = 0; | |
873 | |
874 if (!p->WasSwept()) { | |
875 free_bytes = (p->area_size() - p->LiveBytes()); | |
876 } else { | |
877 PagedSpace::SizeStats sizes; | |
878 space->ObtainFreeListStatistics(p, &sizes); | |
879 free_bytes = sizes.Total(); | |
880 } | |
881 | |
882 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size(); | |
883 | |
884 if (free_pct >= kFreenessThreshold) { | |
885 estimated_release += free_bytes; | |
886 fragmentation = free_pct; | |
887 } else { | |
888 fragmentation = 0; | |
889 } | |
890 | |
891 if (FLAG_trace_fragmentation) { | |
892 PrintF("%p [%s]: %d (%.2f%%) free %s\n", | |
893 reinterpret_cast<void*>(p), | |
894 AllocationSpaceName(space->identity()), | |
895 static_cast<int>(free_bytes), | |
896 static_cast<double>(free_bytes * 100) / p->area_size(), | |
897 (fragmentation > 0) ? "[fragmented]" : ""); | |
898 } | |
899 } else { | |
900 fragmentation = FreeListFragmentation(space, p); | |
901 } | |
902 | |
903 if (fragmentation != 0) { | |
904 if (count < max_evacuation_candidates) { | |
905 candidates[count++] = Candidate(fragmentation, p); | |
906 } else { | |
907 if (least == NULL) { | |
908 for (int i = 0; i < max_evacuation_candidates; i++) { | |
909 if (least == NULL || | |
910 candidates[i].fragmentation() < least->fragmentation()) { | |
911 least = candidates + i; | |
912 } | |
913 } | |
914 } | |
915 if (least->fragmentation() < fragmentation) { | |
916 *least = Candidate(fragmentation, p); | |
917 least = NULL; | |
918 } | |
919 } | |
920 } | |
921 } | |
922 | |
923 for (int i = 0; i < count; i++) { | |
924 AddEvacuationCandidate(candidates[i].page()); | |
925 } | |
926 | |
927 if (count > 0 && FLAG_trace_fragmentation) { | |
928 PrintF("Collected %d evacuation candidates for space %s\n", | |
929 count, | |
930 AllocationSpaceName(space->identity())); | |
931 } | |
932 } | |
933 | |
934 | |
935 void MarkCompactCollector::AbortCompaction() { | |
936 if (compacting_) { | |
937 int npages = evacuation_candidates_.length(); | |
938 for (int i = 0; i < npages; i++) { | |
939 Page* p = evacuation_candidates_[i]; | |
940 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); | |
941 p->ClearEvacuationCandidate(); | |
942 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | |
943 } | |
944 compacting_ = false; | |
945 evacuation_candidates_.Rewind(0); | |
946 invalidated_code_.Rewind(0); | |
947 } | |
948 DCHECK_EQ(0, evacuation_candidates_.length()); | |
949 } | |
950 | |
951 | |
952 void MarkCompactCollector::Prepare() { | |
953 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); | |
954 | |
955 #ifdef DEBUG | |
956 DCHECK(state_ == IDLE); | |
957 state_ = PREPARE_GC; | |
958 #endif | |
959 | |
960 DCHECK(!FLAG_never_compact || !FLAG_always_compact); | |
961 | |
962 if (sweeping_in_progress()) { | |
963 // Instead of waiting we could also abort the sweeper threads here. | |
964 EnsureSweepingCompleted(); | |
965 } | |
966 | |
967 // Clear marking bits if incremental marking is aborted. | |
968 if (was_marked_incrementally_ && abort_incremental_marking_) { | |
969 heap()->incremental_marking()->Abort(); | |
970 ClearMarkbits(); | |
971 AbortCompaction(); | |
972 was_marked_incrementally_ = false; | |
973 } | |
974 | |
975 // Don't start compaction if we are in the middle of incremental | |
976 // marking cycle. We did not collect any slots. | |
977 if (!FLAG_never_compact && !was_marked_incrementally_) { | |
978 StartCompaction(NON_INCREMENTAL_COMPACTION); | |
979 } | |
980 | |
981 PagedSpaces spaces(heap()); | |
982 for (PagedSpace* space = spaces.next(); | |
983 space != NULL; | |
984 space = spaces.next()) { | |
985 space->PrepareForMarkCompact(); | |
986 } | |
987 | |
988 #ifdef VERIFY_HEAP | |
989 if (!was_marked_incrementally_ && FLAG_verify_heap) { | |
990 VerifyMarkbitsAreClean(); | |
991 } | |
992 #endif | |
993 } | |
994 | |
995 | |
996 void MarkCompactCollector::Finish() { | |
997 #ifdef DEBUG | |
998 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); | |
999 state_ = IDLE; | |
1000 #endif | |
1001 // The stub cache is not traversed during GC; clear the cache to | |
1002 // force lazy re-initialization of it. This must be done after the | |
1003 // GC, because it relies on the new address of certain old space | |
1004 // objects (empty string, illegal builtin). | |
1005 isolate()->stub_cache()->Clear(); | |
1006 | |
1007 if (have_code_to_deoptimize_) { | |
1008 // Some code objects were marked for deoptimization during the GC. | |
1009 Deoptimizer::DeoptimizeMarkedCode(isolate()); | |
1010 have_code_to_deoptimize_ = false; | |
1011 } | |
1012 } | |
1013 | |
1014 | |
1015 // ------------------------------------------------------------------------- | |
1016 // Phase 1: tracing and marking live objects. | |
1017 // before: all objects are in normal state. | |
1018 // after: a live object's map pointer is marked as '00'. | |
1019 | |
1020 // Marking all live objects in the heap as part of mark-sweep or mark-compact | |
1021 // collection. Before marking, all objects are in their normal state. After | |
1022 // marking, live objects' map pointers are marked indicating that the object | |
1023 // has been found reachable. | |
1024 // | |
1025 // The marking algorithm is a (mostly) depth-first (because of possible stack | |
1026 // overflow) traversal of the graph of objects reachable from the roots. It | |
1027 // uses an explicit stack of pointers rather than recursion. The young | |
1028 // generation's inactive ('from') space is used as a marking stack. The | |
1029 // objects in the marking stack are the ones that have been reached and marked | |
1030 // but their children have not yet been visited. | |
1031 // | |
1032 // The marking stack can overflow during traversal. In that case, we set an | |
1033 // overflow flag. When the overflow flag is set, we continue marking objects | |
1034 // reachable from the objects on the marking stack, but no longer push them on | |
1035 // the marking stack. Instead, we mark them as both marked and overflowed. | |
1036 // When the stack is in the overflowed state, objects marked as overflowed | |
1037 // have been reached and marked but their children have not been visited yet. | |
1038 // After emptying the marking stack, we clear the overflow flag and traverse | |
1039 // the heap looking for objects marked as overflowed, push them on the stack, | |
1040 // and continue with marking. This process repeats until all reachable | |
1041 // objects have been marked. | |
1042 | |
1043 void CodeFlusher::ProcessJSFunctionCandidates() { | |
1044 Code* lazy_compile = | |
1045 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized); | |
1046 Object* undefined = isolate_->heap()->undefined_value(); | |
1047 | |
1048 JSFunction* candidate = jsfunction_candidates_head_; | |
1049 JSFunction* next_candidate; | |
1050 while (candidate != NULL) { | |
1051 next_candidate = GetNextCandidate(candidate); | |
1052 ClearNextCandidate(candidate, undefined); | |
1053 | |
1054 SharedFunctionInfo* shared = candidate->shared(); | |
1055 | |
1056 Code* code = shared->code(); | |
1057 MarkBit code_mark = Marking::MarkBitFrom(code); | |
1058 if (!code_mark.Get()) { | |
1059 if (FLAG_trace_code_flushing && shared->is_compiled()) { | |
1060 PrintF("[code-flushing clears: "); | |
1061 shared->ShortPrint(); | |
1062 PrintF(" - age: %d]\n", code->GetAge()); | |
1063 } | |
1064 shared->set_code(lazy_compile); | |
1065 candidate->set_code(lazy_compile); | |
1066 } else { | |
1067 candidate->set_code(code); | |
1068 } | |
1069 | |
1070 // We are in the middle of a GC cycle so the write barrier in the code | |
1071 // setter did not record the slot update and we have to do that manually. | |
1072 Address slot = candidate->address() + JSFunction::kCodeEntryOffset; | |
1073 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot)); | |
1074 isolate_->heap()->mark_compact_collector()-> | |
1075 RecordCodeEntrySlot(slot, target); | |
1076 | |
1077 Object** shared_code_slot = | |
1078 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset); | |
1079 isolate_->heap()->mark_compact_collector()-> | |
1080 RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot); | |
1081 | |
1082 candidate = next_candidate; | |
1083 } | |
1084 | |
1085 jsfunction_candidates_head_ = NULL; | |
1086 } | |
1087 | |
1088 | |
1089 void CodeFlusher::ProcessSharedFunctionInfoCandidates() { | |
1090 Code* lazy_compile = | |
1091 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized); | |
1092 | |
1093 SharedFunctionInfo* candidate = shared_function_info_candidates_head_; | |
1094 SharedFunctionInfo* next_candidate; | |
1095 while (candidate != NULL) { | |
1096 next_candidate = GetNextCandidate(candidate); | |
1097 ClearNextCandidate(candidate); | |
1098 | |
1099 Code* code = candidate->code(); | |
1100 MarkBit code_mark = Marking::MarkBitFrom(code); | |
1101 if (!code_mark.Get()) { | |
1102 if (FLAG_trace_code_flushing && candidate->is_compiled()) { | |
1103 PrintF("[code-flushing clears: "); | |
1104 candidate->ShortPrint(); | |
1105 PrintF(" - age: %d]\n", code->GetAge()); | |
1106 } | |
1107 candidate->set_code(lazy_compile); | |
1108 } | |
1109 | |
1110 Object** code_slot = | |
1111 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset); | |
1112 isolate_->heap()->mark_compact_collector()-> | |
1113 RecordSlot(code_slot, code_slot, *code_slot); | |
1114 | |
1115 candidate = next_candidate; | |
1116 } | |
1117 | |
1118 shared_function_info_candidates_head_ = NULL; | |
1119 } | |
1120 | |
1121 | |
1122 void CodeFlusher::ProcessOptimizedCodeMaps() { | |
1123 STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4); | |
1124 | |
1125 SharedFunctionInfo* holder = optimized_code_map_holder_head_; | |
1126 SharedFunctionInfo* next_holder; | |
1127 | |
1128 while (holder != NULL) { | |
1129 next_holder = GetNextCodeMap(holder); | |
1130 ClearNextCodeMap(holder); | |
1131 | |
1132 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); | |
1133 int new_length = SharedFunctionInfo::kEntriesStart; | |
1134 int old_length = code_map->length(); | |
1135 for (int i = SharedFunctionInfo::kEntriesStart; | |
1136 i < old_length; | |
1137 i += SharedFunctionInfo::kEntryLength) { | |
1138 Code* code = | |
1139 Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset)); | |
1140 if (!Marking::MarkBitFrom(code).Get()) continue; | |
1141 | |
1142 // Move every slot in the entry. | |
1143 for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) { | |
1144 int dst_index = new_length++; | |
1145 Object** slot = code_map->RawFieldOfElementAt(dst_index); | |
1146 Object* object = code_map->get(i + j); | |
1147 code_map->set(dst_index, object); | |
1148 if (j == SharedFunctionInfo::kOsrAstIdOffset) { | |
1149 DCHECK(object->IsSmi()); | |
1150 } else { | |
1151 DCHECK(Marking::IsBlack( | |
1152 Marking::MarkBitFrom(HeapObject::cast(*slot)))); | |
1153 isolate_->heap()->mark_compact_collector()-> | |
1154 RecordSlot(slot, slot, *slot); | |
1155 } | |
1156 } | |
1157 } | |
1158 | |
1159 // Trim the optimized code map if entries have been removed. | |
1160 if (new_length < old_length) { | |
1161 holder->TrimOptimizedCodeMap(old_length - new_length); | |
1162 } | |
1163 | |
1164 holder = next_holder; | |
1165 } | |
1166 | |
1167 optimized_code_map_holder_head_ = NULL; | |
1168 } | |
1169 | |
1170 | |
1171 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) { | |
1172 // Make sure previous flushing decisions are revisited. | |
1173 isolate_->heap()->incremental_marking()->RecordWrites(shared_info); | |
1174 | |
1175 if (FLAG_trace_code_flushing) { | |
1176 PrintF("[code-flushing abandons function-info: "); | |
1177 shared_info->ShortPrint(); | |
1178 PrintF("]\n"); | |
1179 } | |
1180 | |
1181 SharedFunctionInfo* candidate = shared_function_info_candidates_head_; | |
1182 SharedFunctionInfo* next_candidate; | |
1183 if (candidate == shared_info) { | |
1184 next_candidate = GetNextCandidate(shared_info); | |
1185 shared_function_info_candidates_head_ = next_candidate; | |
1186 ClearNextCandidate(shared_info); | |
1187 } else { | |
1188 while (candidate != NULL) { | |
1189 next_candidate = GetNextCandidate(candidate); | |
1190 | |
1191 if (next_candidate == shared_info) { | |
1192 next_candidate = GetNextCandidate(shared_info); | |
1193 SetNextCandidate(candidate, next_candidate); | |
1194 ClearNextCandidate(shared_info); | |
1195 break; | |
1196 } | |
1197 | |
1198 candidate = next_candidate; | |
1199 } | |
1200 } | |
1201 } | |
1202 | |
1203 | |
1204 void CodeFlusher::EvictCandidate(JSFunction* function) { | |
1205 DCHECK(!function->next_function_link()->IsUndefined()); | |
1206 Object* undefined = isolate_->heap()->undefined_value(); | |
1207 | |
1208 // Make sure previous flushing decisions are revisited. | |
1209 isolate_->heap()->incremental_marking()->RecordWrites(function); | |
1210 isolate_->heap()->incremental_marking()->RecordWrites(function->shared()); | |
1211 | |
1212 if (FLAG_trace_code_flushing) { | |
1213 PrintF("[code-flushing abandons closure: "); | |
1214 function->shared()->ShortPrint(); | |
1215 PrintF("]\n"); | |
1216 } | |
1217 | |
1218 JSFunction* candidate = jsfunction_candidates_head_; | |
1219 JSFunction* next_candidate; | |
1220 if (candidate == function) { | |
1221 next_candidate = GetNextCandidate(function); | |
1222 jsfunction_candidates_head_ = next_candidate; | |
1223 ClearNextCandidate(function, undefined); | |
1224 } else { | |
1225 while (candidate != NULL) { | |
1226 next_candidate = GetNextCandidate(candidate); | |
1227 | |
1228 if (next_candidate == function) { | |
1229 next_candidate = GetNextCandidate(function); | |
1230 SetNextCandidate(candidate, next_candidate); | |
1231 ClearNextCandidate(function, undefined); | |
1232 break; | |
1233 } | |
1234 | |
1235 candidate = next_candidate; | |
1236 } | |
1237 } | |
1238 } | |
1239 | |
1240 | |
1241 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) { | |
1242 DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())-> | |
1243 get(SharedFunctionInfo::kNextMapIndex)->IsUndefined()); | |
1244 | |
1245 // Make sure previous flushing decisions are revisited. | |
1246 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder); | |
1247 | |
1248 if (FLAG_trace_code_flushing) { | |
1249 PrintF("[code-flushing abandons code-map: "); | |
1250 code_map_holder->ShortPrint(); | |
1251 PrintF("]\n"); | |
1252 } | |
1253 | |
1254 SharedFunctionInfo* holder = optimized_code_map_holder_head_; | |
1255 SharedFunctionInfo* next_holder; | |
1256 if (holder == code_map_holder) { | |
1257 next_holder = GetNextCodeMap(code_map_holder); | |
1258 optimized_code_map_holder_head_ = next_holder; | |
1259 ClearNextCodeMap(code_map_holder); | |
1260 } else { | |
1261 while (holder != NULL) { | |
1262 next_holder = GetNextCodeMap(holder); | |
1263 | |
1264 if (next_holder == code_map_holder) { | |
1265 next_holder = GetNextCodeMap(code_map_holder); | |
1266 SetNextCodeMap(holder, next_holder); | |
1267 ClearNextCodeMap(code_map_holder); | |
1268 break; | |
1269 } | |
1270 | |
1271 holder = next_holder; | |
1272 } | |
1273 } | |
1274 } | |
1275 | |
1276 | |
1277 void CodeFlusher::EvictJSFunctionCandidates() { | |
1278 JSFunction* candidate = jsfunction_candidates_head_; | |
1279 JSFunction* next_candidate; | |
1280 while (candidate != NULL) { | |
1281 next_candidate = GetNextCandidate(candidate); | |
1282 EvictCandidate(candidate); | |
1283 candidate = next_candidate; | |
1284 } | |
1285 DCHECK(jsfunction_candidates_head_ == NULL); | |
1286 } | |
1287 | |
1288 | |
1289 void CodeFlusher::EvictSharedFunctionInfoCandidates() { | |
1290 SharedFunctionInfo* candidate = shared_function_info_candidates_head_; | |
1291 SharedFunctionInfo* next_candidate; | |
1292 while (candidate != NULL) { | |
1293 next_candidate = GetNextCandidate(candidate); | |
1294 EvictCandidate(candidate); | |
1295 candidate = next_candidate; | |
1296 } | |
1297 DCHECK(shared_function_info_candidates_head_ == NULL); | |
1298 } | |
1299 | |
1300 | |
1301 void CodeFlusher::EvictOptimizedCodeMaps() { | |
1302 SharedFunctionInfo* holder = optimized_code_map_holder_head_; | |
1303 SharedFunctionInfo* next_holder; | |
1304 while (holder != NULL) { | |
1305 next_holder = GetNextCodeMap(holder); | |
1306 EvictOptimizedCodeMap(holder); | |
1307 holder = next_holder; | |
1308 } | |
1309 DCHECK(optimized_code_map_holder_head_ == NULL); | |
1310 } | |
1311 | |
1312 | |
1313 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) { | |
1314 Heap* heap = isolate_->heap(); | |
1315 | |
1316 JSFunction** slot = &jsfunction_candidates_head_; | |
1317 JSFunction* candidate = jsfunction_candidates_head_; | |
1318 while (candidate != NULL) { | |
1319 if (heap->InFromSpace(candidate)) { | |
1320 v->VisitPointer(reinterpret_cast<Object**>(slot)); | |
1321 } | |
1322 candidate = GetNextCandidate(*slot); | |
1323 slot = GetNextCandidateSlot(*slot); | |
1324 } | |
1325 } | |
1326 | |
1327 | |
1328 MarkCompactCollector::~MarkCompactCollector() { | |
1329 if (code_flusher_ != NULL) { | |
1330 delete code_flusher_; | |
1331 code_flusher_ = NULL; | |
1332 } | |
1333 } | |
1334 | |
1335 | |
1336 static inline HeapObject* ShortCircuitConsString(Object** p) { | |
1337 // Optimization: If the heap object pointed to by p is a non-internalized | |
1338 // cons string whose right substring is HEAP->empty_string, update | |
1339 // it in place to its left substring. Return the updated value. | |
1340 // | |
1341 // Here we assume that if we change *p, we replace it with a heap object | |
1342 // (i.e., the left substring of a cons string is always a heap object). | |
1343 // | |
1344 // The check performed is: | |
1345 // object->IsConsString() && !object->IsInternalizedString() && | |
1346 // (ConsString::cast(object)->second() == HEAP->empty_string()) | |
1347 // except the maps for the object and its possible substrings might be | |
1348 // marked. | |
1349 HeapObject* object = HeapObject::cast(*p); | |
1350 if (!FLAG_clever_optimizations) return object; | |
1351 Map* map = object->map(); | |
1352 InstanceType type = map->instance_type(); | |
1353 if (!IsShortcutCandidate(type)) return object; | |
1354 | |
1355 Object* second = reinterpret_cast<ConsString*>(object)->second(); | |
1356 Heap* heap = map->GetHeap(); | |
1357 if (second != heap->empty_string()) { | |
1358 return object; | |
1359 } | |
1360 | |
1361 // Since we don't have the object's start, it is impossible to update the | |
1362 // page dirty marks. Therefore, we only replace the string with its left | |
1363 // substring when page dirty marks do not change. | |
1364 Object* first = reinterpret_cast<ConsString*>(object)->first(); | |
1365 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object; | |
1366 | |
1367 *p = first; | |
1368 return HeapObject::cast(first); | |
1369 } | |
1370 | |
1371 | |
1372 class MarkCompactMarkingVisitor | |
1373 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> { | |
1374 public: | |
1375 static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, | |
1376 Map* map, HeapObject* obj); | |
1377 | |
1378 static void ObjectStatsCountFixedArray( | |
1379 FixedArrayBase* fixed_array, | |
1380 FixedArraySubInstanceType fast_type, | |
1381 FixedArraySubInstanceType dictionary_type); | |
1382 | |
1383 template<MarkCompactMarkingVisitor::VisitorId id> | |
1384 class ObjectStatsTracker { | |
1385 public: | |
1386 static inline void Visit(Map* map, HeapObject* obj); | |
1387 }; | |
1388 | |
1389 static void Initialize(); | |
1390 | |
1391 INLINE(static void VisitPointer(Heap* heap, Object** p)) { | |
1392 MarkObjectByPointer(heap->mark_compact_collector(), p, p); | |
1393 } | |
1394 | |
1395 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { | |
1396 // Mark all objects pointed to in [start, end). | |
1397 const int kMinRangeForMarkingRecursion = 64; | |
1398 if (end - start >= kMinRangeForMarkingRecursion) { | |
1399 if (VisitUnmarkedObjects(heap, start, end)) return; | |
1400 // We are close to a stack overflow, so just mark the objects. | |
1401 } | |
1402 MarkCompactCollector* collector = heap->mark_compact_collector(); | |
1403 for (Object** p = start; p < end; p++) { | |
1404 MarkObjectByPointer(collector, start, p); | |
1405 } | |
1406 } | |
1407 | |
1408 // Marks the object black and pushes it on the marking stack. | |
1409 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { | |
1410 MarkBit mark = Marking::MarkBitFrom(object); | |
1411 heap->mark_compact_collector()->MarkObject(object, mark); | |
1412 } | |
1413 | |
1414 // Marks the object black without pushing it on the marking stack. | |
1415 // Returns true if object needed marking and false otherwise. | |
1416 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) { | |
1417 MarkBit mark_bit = Marking::MarkBitFrom(object); | |
1418 if (!mark_bit.Get()) { | |
1419 heap->mark_compact_collector()->SetMark(object, mark_bit); | |
1420 return true; | |
1421 } | |
1422 return false; | |
1423 } | |
1424 | |
1425 // Mark object pointed to by p. | |
1426 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, | |
1427 Object** anchor_slot, | |
1428 Object** p)) { | |
1429 if (!(*p)->IsHeapObject()) return; | |
1430 HeapObject* object = ShortCircuitConsString(p); | |
1431 collector->RecordSlot(anchor_slot, p, object); | |
1432 MarkBit mark = Marking::MarkBitFrom(object); | |
1433 collector->MarkObject(object, mark); | |
1434 } | |
1435 | |
1436 | |
1437 // Visit an unmarked object. | |
1438 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, | |
1439 HeapObject* obj)) { | |
1440 #ifdef DEBUG | |
1441 DCHECK(collector->heap()->Contains(obj)); | |
1442 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj)); | |
1443 #endif | |
1444 Map* map = obj->map(); | |
1445 Heap* heap = obj->GetHeap(); | |
1446 MarkBit mark = Marking::MarkBitFrom(obj); | |
1447 heap->mark_compact_collector()->SetMark(obj, mark); | |
1448 // Mark the map pointer and the body. | |
1449 MarkBit map_mark = Marking::MarkBitFrom(map); | |
1450 heap->mark_compact_collector()->MarkObject(map, map_mark); | |
1451 IterateBody(map, obj); | |
1452 } | |
1453 | |
1454 // Visit all unmarked objects pointed to by [start, end). | |
1455 // Returns false if the operation fails (lack of stack space). | |
1456 INLINE(static bool VisitUnmarkedObjects(Heap* heap, | |
1457 Object** start, | |
1458 Object** end)) { | |
1459 // Return false is we are close to the stack limit. | |
1460 StackLimitCheck check(heap->isolate()); | |
1461 if (check.HasOverflowed()) return false; | |
1462 | |
1463 MarkCompactCollector* collector = heap->mark_compact_collector(); | |
1464 // Visit the unmarked objects. | |
1465 for (Object** p = start; p < end; p++) { | |
1466 Object* o = *p; | |
1467 if (!o->IsHeapObject()) continue; | |
1468 collector->RecordSlot(start, p, o); | |
1469 HeapObject* obj = HeapObject::cast(o); | |
1470 MarkBit mark = Marking::MarkBitFrom(obj); | |
1471 if (mark.Get()) continue; | |
1472 VisitUnmarkedObject(collector, obj); | |
1473 } | |
1474 return true; | |
1475 } | |
1476 | |
1477 private: | |
1478 template<int id> | |
1479 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj); | |
1480 | |
1481 // Code flushing support. | |
1482 | |
1483 static const int kRegExpCodeThreshold = 5; | |
1484 | |
1485 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, | |
1486 JSRegExp* re, | |
1487 bool is_ascii) { | |
1488 // Make sure that the fixed array is in fact initialized on the RegExp. | |
1489 // We could potentially trigger a GC when initializing the RegExp. | |
1490 if (HeapObject::cast(re->data())->map()->instance_type() != | |
1491 FIXED_ARRAY_TYPE) return; | |
1492 | |
1493 // Make sure this is a RegExp that actually contains code. | |
1494 if (re->TypeTag() != JSRegExp::IRREGEXP) return; | |
1495 | |
1496 Object* code = re->DataAt(JSRegExp::code_index(is_ascii)); | |
1497 if (!code->IsSmi() && | |
1498 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) { | |
1499 // Save a copy that can be reinstated if we need the code again. | |
1500 re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code); | |
1501 | |
1502 // Saving a copy might create a pointer into compaction candidate | |
1503 // that was not observed by marker. This might happen if JSRegExp data | |
1504 // was marked through the compilation cache before marker reached JSRegExp | |
1505 // object. | |
1506 FixedArray* data = FixedArray::cast(re->data()); | |
1507 Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii); | |
1508 heap->mark_compact_collector()-> | |
1509 RecordSlot(slot, slot, code); | |
1510 | |
1511 // Set a number in the 0-255 range to guarantee no smi overflow. | |
1512 re->SetDataAt(JSRegExp::code_index(is_ascii), | |
1513 Smi::FromInt(heap->sweep_generation() & 0xff)); | |
1514 } else if (code->IsSmi()) { | |
1515 int value = Smi::cast(code)->value(); | |
1516 // The regexp has not been compiled yet or there was a compilation error. | |
1517 if (value == JSRegExp::kUninitializedValue || | |
1518 value == JSRegExp::kCompilationErrorValue) { | |
1519 return; | |
1520 } | |
1521 | |
1522 // Check if we should flush now. | |
1523 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) { | |
1524 re->SetDataAt(JSRegExp::code_index(is_ascii), | |
1525 Smi::FromInt(JSRegExp::kUninitializedValue)); | |
1526 re->SetDataAt(JSRegExp::saved_code_index(is_ascii), | |
1527 Smi::FromInt(JSRegExp::kUninitializedValue)); | |
1528 } | |
1529 } | |
1530 } | |
1531 | |
1532 | |
1533 // Works by setting the current sweep_generation (as a smi) in the | |
1534 // code object place in the data array of the RegExp and keeps a copy | |
1535 // around that can be reinstated if we reuse the RegExp before flushing. | |
1536 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs | |
1537 // we flush the code. | |
1538 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) { | |
1539 Heap* heap = map->GetHeap(); | |
1540 MarkCompactCollector* collector = heap->mark_compact_collector(); | |
1541 if (!collector->is_code_flushing_enabled()) { | |
1542 VisitJSRegExp(map, object); | |
1543 return; | |
1544 } | |
1545 JSRegExp* re = reinterpret_cast<JSRegExp*>(object); | |
1546 // Flush code or set age on both ASCII and two byte code. | |
1547 UpdateRegExpCodeAgeAndFlush(heap, re, true); | |
1548 UpdateRegExpCodeAgeAndFlush(heap, re, false); | |
1549 // Visit the fields of the RegExp, including the updated FixedArray. | |
1550 VisitJSRegExp(map, object); | |
1551 } | |
1552 | |
1553 static VisitorDispatchTable<Callback> non_count_table_; | |
1554 }; | |
1555 | |
1556 | |
1557 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( | |
1558 FixedArrayBase* fixed_array, | |
1559 FixedArraySubInstanceType fast_type, | |
1560 FixedArraySubInstanceType dictionary_type) { | |
1561 Heap* heap = fixed_array->map()->GetHeap(); | |
1562 if (fixed_array->map() != heap->fixed_cow_array_map() && | |
1563 fixed_array->map() != heap->fixed_double_array_map() && | |
1564 fixed_array != heap->empty_fixed_array()) { | |
1565 if (fixed_array->IsDictionary()) { | |
1566 heap->RecordFixedArraySubTypeStats(dictionary_type, | |
1567 fixed_array->Size()); | |
1568 } else { | |
1569 heap->RecordFixedArraySubTypeStats(fast_type, | |
1570 fixed_array->Size()); | |
1571 } | |
1572 } | |
1573 } | |
1574 | |
1575 | |
1576 void MarkCompactMarkingVisitor::ObjectStatsVisitBase( | |
1577 MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) { | |
1578 Heap* heap = map->GetHeap(); | |
1579 int object_size = obj->Size(); | |
1580 heap->RecordObjectStats(map->instance_type(), object_size); | |
1581 non_count_table_.GetVisitorById(id)(map, obj); | |
1582 if (obj->IsJSObject()) { | |
1583 JSObject* object = JSObject::cast(obj); | |
1584 ObjectStatsCountFixedArray(object->elements(), | |
1585 DICTIONARY_ELEMENTS_SUB_TYPE, | |
1586 FAST_ELEMENTS_SUB_TYPE); | |
1587 ObjectStatsCountFixedArray(object->properties(), | |
1588 DICTIONARY_PROPERTIES_SUB_TYPE, | |
1589 FAST_PROPERTIES_SUB_TYPE); | |
1590 } | |
1591 } | |
1592 | |
1593 | |
1594 template<MarkCompactMarkingVisitor::VisitorId id> | |
1595 void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit( | |
1596 Map* map, HeapObject* obj) { | |
1597 ObjectStatsVisitBase(id, map, obj); | |
1598 } | |
1599 | |
1600 | |
1601 template<> | |
1602 class MarkCompactMarkingVisitor::ObjectStatsTracker< | |
1603 MarkCompactMarkingVisitor::kVisitMap> { | |
1604 public: | |
1605 static inline void Visit(Map* map, HeapObject* obj) { | |
1606 Heap* heap = map->GetHeap(); | |
1607 Map* map_obj = Map::cast(obj); | |
1608 DCHECK(map->instance_type() == MAP_TYPE); | |
1609 DescriptorArray* array = map_obj->instance_descriptors(); | |
1610 if (map_obj->owns_descriptors() && | |
1611 array != heap->empty_descriptor_array()) { | |
1612 int fixed_array_size = array->Size(); | |
1613 heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE, | |
1614 fixed_array_size); | |
1615 } | |
1616 if (map_obj->HasTransitionArray()) { | |
1617 int fixed_array_size = map_obj->transitions()->Size(); | |
1618 heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE, | |
1619 fixed_array_size); | |
1620 } | |
1621 if (map_obj->has_code_cache()) { | |
1622 CodeCache* cache = CodeCache::cast(map_obj->code_cache()); | |
1623 heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE, | |
1624 cache->default_cache()->Size()); | |
1625 if (!cache->normal_type_cache()->IsUndefined()) { | |
1626 heap->RecordFixedArraySubTypeStats( | |
1627 MAP_CODE_CACHE_SUB_TYPE, | |
1628 FixedArray::cast(cache->normal_type_cache())->Size()); | |
1629 } | |
1630 } | |
1631 ObjectStatsVisitBase(kVisitMap, map, obj); | |
1632 } | |
1633 }; | |
1634 | |
1635 | |
1636 template<> | |
1637 class MarkCompactMarkingVisitor::ObjectStatsTracker< | |
1638 MarkCompactMarkingVisitor::kVisitCode> { | |
1639 public: | |
1640 static inline void Visit(Map* map, HeapObject* obj) { | |
1641 Heap* heap = map->GetHeap(); | |
1642 int object_size = obj->Size(); | |
1643 DCHECK(map->instance_type() == CODE_TYPE); | |
1644 Code* code_obj = Code::cast(obj); | |
1645 heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(), | |
1646 object_size); | |
1647 ObjectStatsVisitBase(kVisitCode, map, obj); | |
1648 } | |
1649 }; | |
1650 | |
1651 | |
1652 template<> | |
1653 class MarkCompactMarkingVisitor::ObjectStatsTracker< | |
1654 MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> { | |
1655 public: | |
1656 static inline void Visit(Map* map, HeapObject* obj) { | |
1657 Heap* heap = map->GetHeap(); | |
1658 SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj); | |
1659 if (sfi->scope_info() != heap->empty_fixed_array()) { | |
1660 heap->RecordFixedArraySubTypeStats( | |
1661 SCOPE_INFO_SUB_TYPE, | |
1662 FixedArray::cast(sfi->scope_info())->Size()); | |
1663 } | |
1664 ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj); | |
1665 } | |
1666 }; | |
1667 | |
1668 | |
1669 template<> | |
1670 class MarkCompactMarkingVisitor::ObjectStatsTracker< | |
1671 MarkCompactMarkingVisitor::kVisitFixedArray> { | |
1672 public: | |
1673 static inline void Visit(Map* map, HeapObject* obj) { | |
1674 Heap* heap = map->GetHeap(); | |
1675 FixedArray* fixed_array = FixedArray::cast(obj); | |
1676 if (fixed_array == heap->string_table()) { | |
1677 heap->RecordFixedArraySubTypeStats( | |
1678 STRING_TABLE_SUB_TYPE, | |
1679 fixed_array->Size()); | |
1680 } | |
1681 ObjectStatsVisitBase(kVisitFixedArray, map, obj); | |
1682 } | |
1683 }; | |
1684 | |
1685 | |
1686 void MarkCompactMarkingVisitor::Initialize() { | |
1687 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize(); | |
1688 | |
1689 table_.Register(kVisitJSRegExp, | |
1690 &VisitRegExpAndFlushCode); | |
1691 | |
1692 if (FLAG_track_gc_object_stats) { | |
1693 // Copy the visitor table to make call-through possible. | |
1694 non_count_table_.CopyFrom(&table_); | |
1695 #define VISITOR_ID_COUNT_FUNCTION(id) \ | |
1696 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit); | |
1697 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION) | |
1698 #undef VISITOR_ID_COUNT_FUNCTION | |
1699 } | |
1700 } | |
1701 | |
1702 | |
1703 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback> | |
1704 MarkCompactMarkingVisitor::non_count_table_; | |
1705 | |
1706 | |
1707 class CodeMarkingVisitor : public ThreadVisitor { | |
1708 public: | |
1709 explicit CodeMarkingVisitor(MarkCompactCollector* collector) | |
1710 : collector_(collector) {} | |
1711 | |
1712 void VisitThread(Isolate* isolate, ThreadLocalTop* top) { | |
1713 collector_->PrepareThreadForCodeFlushing(isolate, top); | |
1714 } | |
1715 | |
1716 private: | |
1717 MarkCompactCollector* collector_; | |
1718 }; | |
1719 | |
1720 | |
1721 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { | |
1722 public: | |
1723 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) | |
1724 : collector_(collector) {} | |
1725 | |
1726 void VisitPointers(Object** start, Object** end) { | |
1727 for (Object** p = start; p < end; p++) VisitPointer(p); | |
1728 } | |
1729 | |
1730 void VisitPointer(Object** slot) { | |
1731 Object* obj = *slot; | |
1732 if (obj->IsSharedFunctionInfo()) { | |
1733 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); | |
1734 MarkBit shared_mark = Marking::MarkBitFrom(shared); | |
1735 MarkBit code_mark = Marking::MarkBitFrom(shared->code()); | |
1736 collector_->MarkObject(shared->code(), code_mark); | |
1737 collector_->MarkObject(shared, shared_mark); | |
1738 } | |
1739 } | |
1740 | |
1741 private: | |
1742 MarkCompactCollector* collector_; | |
1743 }; | |
1744 | |
1745 | |
1746 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate, | |
1747 ThreadLocalTop* top) { | |
1748 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { | |
1749 // Note: for the frame that has a pending lazy deoptimization | |
1750 // StackFrame::unchecked_code will return a non-optimized code object for | |
1751 // the outermost function and StackFrame::LookupCode will return | |
1752 // actual optimized code object. | |
1753 StackFrame* frame = it.frame(); | |
1754 Code* code = frame->unchecked_code(); | |
1755 MarkBit code_mark = Marking::MarkBitFrom(code); | |
1756 MarkObject(code, code_mark); | |
1757 if (frame->is_optimized()) { | |
1758 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(), | |
1759 frame->LookupCode()); | |
1760 } | |
1761 } | |
1762 } | |
1763 | |
1764 | |
1765 void MarkCompactCollector::PrepareForCodeFlushing() { | |
1766 // Enable code flushing for non-incremental cycles. | |
1767 if (FLAG_flush_code && !FLAG_flush_code_incrementally) { | |
1768 EnableCodeFlushing(!was_marked_incrementally_); | |
1769 } | |
1770 | |
1771 // If code flushing is disabled, there is no need to prepare for it. | |
1772 if (!is_code_flushing_enabled()) return; | |
1773 | |
1774 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray | |
1775 // relies on it being marked before any other descriptor array. | |
1776 HeapObject* descriptor_array = heap()->empty_descriptor_array(); | |
1777 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array); | |
1778 MarkObject(descriptor_array, descriptor_array_mark); | |
1779 | |
1780 // Make sure we are not referencing the code from the stack. | |
1781 DCHECK(this == heap()->mark_compact_collector()); | |
1782 PrepareThreadForCodeFlushing(heap()->isolate(), | |
1783 heap()->isolate()->thread_local_top()); | |
1784 | |
1785 // Iterate the archived stacks in all threads to check if | |
1786 // the code is referenced. | |
1787 CodeMarkingVisitor code_marking_visitor(this); | |
1788 heap()->isolate()->thread_manager()->IterateArchivedThreads( | |
1789 &code_marking_visitor); | |
1790 | |
1791 SharedFunctionInfoMarkingVisitor visitor(this); | |
1792 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); | |
1793 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); | |
1794 | |
1795 ProcessMarkingDeque(); | |
1796 } | |
1797 | |
1798 | |
1799 // Visitor class for marking heap roots. | |
1800 class RootMarkingVisitor : public ObjectVisitor { | |
1801 public: | |
1802 explicit RootMarkingVisitor(Heap* heap) | |
1803 : collector_(heap->mark_compact_collector()) { } | |
1804 | |
1805 void VisitPointer(Object** p) { | |
1806 MarkObjectByPointer(p); | |
1807 } | |
1808 | |
1809 void VisitPointers(Object** start, Object** end) { | |
1810 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); | |
1811 } | |
1812 | |
1813 // Skip the weak next code link in a code object, which is visited in | |
1814 // ProcessTopOptimizedFrame. | |
1815 void VisitNextCodeLink(Object** p) { } | |
1816 | |
1817 private: | |
1818 void MarkObjectByPointer(Object** p) { | |
1819 if (!(*p)->IsHeapObject()) return; | |
1820 | |
1821 // Replace flat cons strings in place. | |
1822 HeapObject* object = ShortCircuitConsString(p); | |
1823 MarkBit mark_bit = Marking::MarkBitFrom(object); | |
1824 if (mark_bit.Get()) return; | |
1825 | |
1826 Map* map = object->map(); | |
1827 // Mark the object. | |
1828 collector_->SetMark(object, mark_bit); | |
1829 | |
1830 // Mark the map pointer and body, and push them on the marking stack. | |
1831 MarkBit map_mark = Marking::MarkBitFrom(map); | |
1832 collector_->MarkObject(map, map_mark); | |
1833 MarkCompactMarkingVisitor::IterateBody(map, object); | |
1834 | |
1835 // Mark all the objects reachable from the map and body. May leave | |
1836 // overflowed objects in the heap. | |
1837 collector_->EmptyMarkingDeque(); | |
1838 } | |
1839 | |
1840 MarkCompactCollector* collector_; | |
1841 }; | |
1842 | |
1843 | |
1844 // Helper class for pruning the string table. | |
1845 template<bool finalize_external_strings> | |
1846 class StringTableCleaner : public ObjectVisitor { | |
1847 public: | |
1848 explicit StringTableCleaner(Heap* heap) | |
1849 : heap_(heap), pointers_removed_(0) { } | |
1850 | |
1851 virtual void VisitPointers(Object** start, Object** end) { | |
1852 // Visit all HeapObject pointers in [start, end). | |
1853 for (Object** p = start; p < end; p++) { | |
1854 Object* o = *p; | |
1855 if (o->IsHeapObject() && | |
1856 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { | |
1857 if (finalize_external_strings) { | |
1858 DCHECK(o->IsExternalString()); | |
1859 heap_->FinalizeExternalString(String::cast(*p)); | |
1860 } else { | |
1861 pointers_removed_++; | |
1862 } | |
1863 // Set the entry to the_hole_value (as deleted). | |
1864 *p = heap_->the_hole_value(); | |
1865 } | |
1866 } | |
1867 } | |
1868 | |
1869 int PointersRemoved() { | |
1870 DCHECK(!finalize_external_strings); | |
1871 return pointers_removed_; | |
1872 } | |
1873 | |
1874 private: | |
1875 Heap* heap_; | |
1876 int pointers_removed_; | |
1877 }; | |
1878 | |
1879 | |
1880 typedef StringTableCleaner<false> InternalizedStringTableCleaner; | |
1881 typedef StringTableCleaner<true> ExternalStringTableCleaner; | |
1882 | |
1883 | |
1884 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects | |
1885 // are retained. | |
1886 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { | |
1887 public: | |
1888 virtual Object* RetainAs(Object* object) { | |
1889 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) { | |
1890 return object; | |
1891 } else if (object->IsAllocationSite() && | |
1892 !(AllocationSite::cast(object)->IsZombie())) { | |
1893 // "dead" AllocationSites need to live long enough for a traversal of new | |
1894 // space. These sites get a one-time reprieve. | |
1895 AllocationSite* site = AllocationSite::cast(object); | |
1896 site->MarkZombie(); | |
1897 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site); | |
1898 return object; | |
1899 } else { | |
1900 return NULL; | |
1901 } | |
1902 } | |
1903 }; | |
1904 | |
1905 | |
1906 // Fill the marking stack with overflowed objects returned by the given | |
1907 // iterator. Stop when the marking stack is filled or the end of the space | |
1908 // is reached, whichever comes first. | |
1909 template<class T> | |
1910 static void DiscoverGreyObjectsWithIterator(Heap* heap, | |
1911 MarkingDeque* marking_deque, | |
1912 T* it) { | |
1913 // The caller should ensure that the marking stack is initially not full, | |
1914 // so that we don't waste effort pointlessly scanning for objects. | |
1915 DCHECK(!marking_deque->IsFull()); | |
1916 | |
1917 Map* filler_map = heap->one_pointer_filler_map(); | |
1918 for (HeapObject* object = it->Next(); | |
1919 object != NULL; | |
1920 object = it->Next()) { | |
1921 MarkBit markbit = Marking::MarkBitFrom(object); | |
1922 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) { | |
1923 Marking::GreyToBlack(markbit); | |
1924 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size()); | |
1925 marking_deque->PushBlack(object); | |
1926 if (marking_deque->IsFull()) return; | |
1927 } | |
1928 } | |
1929 } | |
1930 | |
1931 | |
1932 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts); | |
1933 | |
1934 | |
1935 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, | |
1936 MemoryChunk* p) { | |
1937 DCHECK(!marking_deque->IsFull()); | |
1938 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); | |
1939 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); | |
1940 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); | |
1941 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | |
1942 | |
1943 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { | |
1944 Address cell_base = it.CurrentCellBase(); | |
1945 MarkBit::CellType* cell = it.CurrentCell(); | |
1946 | |
1947 const MarkBit::CellType current_cell = *cell; | |
1948 if (current_cell == 0) continue; | |
1949 | |
1950 MarkBit::CellType grey_objects; | |
1951 if (it.HasNext()) { | |
1952 const MarkBit::CellType next_cell = *(cell+1); | |
1953 grey_objects = current_cell & | |
1954 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1))); | |
1955 } else { | |
1956 grey_objects = current_cell & (current_cell >> 1); | |
1957 } | |
1958 | |
1959 int offset = 0; | |
1960 while (grey_objects != 0) { | |
1961 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects); | |
1962 grey_objects >>= trailing_zeros; | |
1963 offset += trailing_zeros; | |
1964 MarkBit markbit(cell, 1 << offset, false); | |
1965 DCHECK(Marking::IsGrey(markbit)); | |
1966 Marking::GreyToBlack(markbit); | |
1967 Address addr = cell_base + offset * kPointerSize; | |
1968 HeapObject* object = HeapObject::FromAddress(addr); | |
1969 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size()); | |
1970 marking_deque->PushBlack(object); | |
1971 if (marking_deque->IsFull()) return; | |
1972 offset += 2; | |
1973 grey_objects >>= 2; | |
1974 } | |
1975 | |
1976 grey_objects >>= (Bitmap::kBitsPerCell - 1); | |
1977 } | |
1978 } | |
1979 | |
1980 | |
1981 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage( | |
1982 NewSpace* new_space, | |
1983 NewSpacePage* p) { | |
1984 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); | |
1985 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); | |
1986 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); | |
1987 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | |
1988 | |
1989 MarkBit::CellType* cells = p->markbits()->cells(); | |
1990 int survivors_size = 0; | |
1991 | |
1992 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { | |
1993 Address cell_base = it.CurrentCellBase(); | |
1994 MarkBit::CellType* cell = it.CurrentCell(); | |
1995 | |
1996 MarkBit::CellType current_cell = *cell; | |
1997 if (current_cell == 0) continue; | |
1998 | |
1999 int offset = 0; | |
2000 while (current_cell != 0) { | |
2001 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell); | |
2002 current_cell >>= trailing_zeros; | |
2003 offset += trailing_zeros; | |
2004 Address address = cell_base + offset * kPointerSize; | |
2005 HeapObject* object = HeapObject::FromAddress(address); | |
2006 | |
2007 int size = object->Size(); | |
2008 survivors_size += size; | |
2009 | |
2010 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT); | |
2011 | |
2012 offset++; | |
2013 current_cell >>= 1; | |
2014 | |
2015 // TODO(hpayer): Refactor EvacuateObject and call this function instead. | |
2016 if (heap()->ShouldBePromoted(object->address(), size) && | |
2017 TryPromoteObject(object, size)) { | |
2018 continue; | |
2019 } | |
2020 | |
2021 AllocationResult allocation = new_space->AllocateRaw(size); | |
2022 if (allocation.IsRetry()) { | |
2023 if (!new_space->AddFreshPage()) { | |
2024 // Shouldn't happen. We are sweeping linearly, and to-space | |
2025 // has the same number of pages as from-space, so there is | |
2026 // always room. | |
2027 UNREACHABLE(); | |
2028 } | |
2029 allocation = new_space->AllocateRaw(size); | |
2030 DCHECK(!allocation.IsRetry()); | |
2031 } | |
2032 Object* target = allocation.ToObjectChecked(); | |
2033 | |
2034 MigrateObject(HeapObject::cast(target), | |
2035 object, | |
2036 size, | |
2037 NEW_SPACE); | |
2038 heap()->IncrementSemiSpaceCopiedObjectSize(size); | |
2039 } | |
2040 *cells = 0; | |
2041 } | |
2042 return survivors_size; | |
2043 } | |
2044 | |
2045 | |
2046 static void DiscoverGreyObjectsInSpace(Heap* heap, | |
2047 MarkingDeque* marking_deque, | |
2048 PagedSpace* space) { | |
2049 if (space->swept_precisely()) { | |
2050 HeapObjectIterator it(space); | |
2051 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); | |
2052 } else { | |
2053 PageIterator it(space); | |
2054 while (it.has_next()) { | |
2055 Page* p = it.next(); | |
2056 DiscoverGreyObjectsOnPage(marking_deque, p); | |
2057 if (marking_deque->IsFull()) return; | |
2058 } | |
2059 } | |
2060 } | |
2061 | |
2062 | |
2063 static void DiscoverGreyObjectsInNewSpace(Heap* heap, | |
2064 MarkingDeque* marking_deque) { | |
2065 NewSpace* space = heap->new_space(); | |
2066 NewSpacePageIterator it(space->bottom(), space->top()); | |
2067 while (it.has_next()) { | |
2068 NewSpacePage* page = it.next(); | |
2069 DiscoverGreyObjectsOnPage(marking_deque, page); | |
2070 if (marking_deque->IsFull()) return; | |
2071 } | |
2072 } | |
2073 | |
2074 | |
2075 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { | |
2076 Object* o = *p; | |
2077 if (!o->IsHeapObject()) return false; | |
2078 HeapObject* heap_object = HeapObject::cast(o); | |
2079 MarkBit mark = Marking::MarkBitFrom(heap_object); | |
2080 return !mark.Get(); | |
2081 } | |
2082 | |
2083 | |
2084 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap, | |
2085 Object** p) { | |
2086 Object* o = *p; | |
2087 DCHECK(o->IsHeapObject()); | |
2088 HeapObject* heap_object = HeapObject::cast(o); | |
2089 MarkBit mark = Marking::MarkBitFrom(heap_object); | |
2090 return !mark.Get(); | |
2091 } | |
2092 | |
2093 | |
2094 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) { | |
2095 StringTable* string_table = heap()->string_table(); | |
2096 // Mark the string table itself. | |
2097 MarkBit string_table_mark = Marking::MarkBitFrom(string_table); | |
2098 if (!string_table_mark.Get()) { | |
2099 // String table could have already been marked by visiting the handles list. | |
2100 SetMark(string_table, string_table_mark); | |
2101 } | |
2102 // Explicitly mark the prefix. | |
2103 string_table->IteratePrefix(visitor); | |
2104 ProcessMarkingDeque(); | |
2105 } | |
2106 | |
2107 | |
2108 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) { | |
2109 MarkBit mark_bit = Marking::MarkBitFrom(site); | |
2110 SetMark(site, mark_bit); | |
2111 } | |
2112 | |
2113 | |
2114 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { | |
2115 // Mark the heap roots including global variables, stack variables, | |
2116 // etc., and all objects reachable from them. | |
2117 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); | |
2118 | |
2119 // Handle the string table specially. | |
2120 MarkStringTable(visitor); | |
2121 | |
2122 MarkWeakObjectToCodeTable(); | |
2123 | |
2124 // There may be overflowed objects in the heap. Visit them now. | |
2125 while (marking_deque_.overflowed()) { | |
2126 RefillMarkingDeque(); | |
2127 EmptyMarkingDeque(); | |
2128 } | |
2129 } | |
2130 | |
2131 | |
2132 void MarkCompactCollector::MarkImplicitRefGroups() { | |
2133 List<ImplicitRefGroup*>* ref_groups = | |
2134 isolate()->global_handles()->implicit_ref_groups(); | |
2135 | |
2136 int last = 0; | |
2137 for (int i = 0; i < ref_groups->length(); i++) { | |
2138 ImplicitRefGroup* entry = ref_groups->at(i); | |
2139 DCHECK(entry != NULL); | |
2140 | |
2141 if (!IsMarked(*entry->parent)) { | |
2142 (*ref_groups)[last++] = entry; | |
2143 continue; | |
2144 } | |
2145 | |
2146 Object*** children = entry->children; | |
2147 // A parent object is marked, so mark all child heap objects. | |
2148 for (size_t j = 0; j < entry->length; ++j) { | |
2149 if ((*children[j])->IsHeapObject()) { | |
2150 HeapObject* child = HeapObject::cast(*children[j]); | |
2151 MarkBit mark = Marking::MarkBitFrom(child); | |
2152 MarkObject(child, mark); | |
2153 } | |
2154 } | |
2155 | |
2156 // Once the entire group has been marked, dispose it because it's | |
2157 // not needed anymore. | |
2158 delete entry; | |
2159 } | |
2160 ref_groups->Rewind(last); | |
2161 } | |
2162 | |
2163 | |
2164 void MarkCompactCollector::MarkWeakObjectToCodeTable() { | |
2165 HeapObject* weak_object_to_code_table = | |
2166 HeapObject::cast(heap()->weak_object_to_code_table()); | |
2167 if (!IsMarked(weak_object_to_code_table)) { | |
2168 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table); | |
2169 SetMark(weak_object_to_code_table, mark); | |
2170 } | |
2171 } | |
2172 | |
2173 | |
2174 // Mark all objects reachable from the objects on the marking stack. | |
2175 // Before: the marking stack contains zero or more heap object pointers. | |
2176 // After: the marking stack is empty, and all objects reachable from the | |
2177 // marking stack have been marked, or are overflowed in the heap. | |
2178 void MarkCompactCollector::EmptyMarkingDeque() { | |
2179 while (!marking_deque_.IsEmpty()) { | |
2180 HeapObject* object = marking_deque_.Pop(); | |
2181 DCHECK(object->IsHeapObject()); | |
2182 DCHECK(heap()->Contains(object)); | |
2183 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | |
2184 | |
2185 Map* map = object->map(); | |
2186 MarkBit map_mark = Marking::MarkBitFrom(map); | |
2187 MarkObject(map, map_mark); | |
2188 | |
2189 MarkCompactMarkingVisitor::IterateBody(map, object); | |
2190 } | |
2191 } | |
2192 | |
2193 | |
2194 // Sweep the heap for overflowed objects, clear their overflow bits, and | |
2195 // push them on the marking stack. Stop early if the marking stack fills | |
2196 // before sweeping completes. If sweeping completes, there are no remaining | |
2197 // overflowed objects in the heap so the overflow flag on the markings stack | |
2198 // is cleared. | |
2199 void MarkCompactCollector::RefillMarkingDeque() { | |
2200 DCHECK(marking_deque_.overflowed()); | |
2201 | |
2202 DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_); | |
2203 if (marking_deque_.IsFull()) return; | |
2204 | |
2205 DiscoverGreyObjectsInSpace(heap(), | |
2206 &marking_deque_, | |
2207 heap()->old_pointer_space()); | |
2208 if (marking_deque_.IsFull()) return; | |
2209 | |
2210 DiscoverGreyObjectsInSpace(heap(), | |
2211 &marking_deque_, | |
2212 heap()->old_data_space()); | |
2213 if (marking_deque_.IsFull()) return; | |
2214 | |
2215 DiscoverGreyObjectsInSpace(heap(), | |
2216 &marking_deque_, | |
2217 heap()->code_space()); | |
2218 if (marking_deque_.IsFull()) return; | |
2219 | |
2220 DiscoverGreyObjectsInSpace(heap(), | |
2221 &marking_deque_, | |
2222 heap()->map_space()); | |
2223 if (marking_deque_.IsFull()) return; | |
2224 | |
2225 DiscoverGreyObjectsInSpace(heap(), | |
2226 &marking_deque_, | |
2227 heap()->cell_space()); | |
2228 if (marking_deque_.IsFull()) return; | |
2229 | |
2230 DiscoverGreyObjectsInSpace(heap(), | |
2231 &marking_deque_, | |
2232 heap()->property_cell_space()); | |
2233 if (marking_deque_.IsFull()) return; | |
2234 | |
2235 LargeObjectIterator lo_it(heap()->lo_space()); | |
2236 DiscoverGreyObjectsWithIterator(heap(), | |
2237 &marking_deque_, | |
2238 &lo_it); | |
2239 if (marking_deque_.IsFull()) return; | |
2240 | |
2241 marking_deque_.ClearOverflowed(); | |
2242 } | |
2243 | |
2244 | |
2245 // Mark all objects reachable (transitively) from objects on the marking | |
2246 // stack. Before: the marking stack contains zero or more heap object | |
2247 // pointers. After: the marking stack is empty and there are no overflowed | |
2248 // objects in the heap. | |
2249 void MarkCompactCollector::ProcessMarkingDeque() { | |
2250 EmptyMarkingDeque(); | |
2251 while (marking_deque_.overflowed()) { | |
2252 RefillMarkingDeque(); | |
2253 EmptyMarkingDeque(); | |
2254 } | |
2255 } | |
2256 | |
2257 | |
2258 // Mark all objects reachable (transitively) from objects on the marking | |
2259 // stack including references only considered in the atomic marking pause. | |
2260 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) { | |
2261 bool work_to_do = true; | |
2262 DCHECK(marking_deque_.IsEmpty()); | |
2263 while (work_to_do) { | |
2264 isolate()->global_handles()->IterateObjectGroups( | |
2265 visitor, &IsUnmarkedHeapObjectWithHeap); | |
2266 MarkImplicitRefGroups(); | |
2267 ProcessWeakCollections(); | |
2268 work_to_do = !marking_deque_.IsEmpty(); | |
2269 ProcessMarkingDeque(); | |
2270 } | |
2271 } | |
2272 | |
2273 | |
2274 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) { | |
2275 for (StackFrameIterator it(isolate(), isolate()->thread_local_top()); | |
2276 !it.done(); it.Advance()) { | |
2277 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) { | |
2278 return; | |
2279 } | |
2280 if (it.frame()->type() == StackFrame::OPTIMIZED) { | |
2281 Code* code = it.frame()->LookupCode(); | |
2282 if (!code->CanDeoptAt(it.frame()->pc())) { | |
2283 code->CodeIterateBody(visitor); | |
2284 } | |
2285 ProcessMarkingDeque(); | |
2286 return; | |
2287 } | |
2288 } | |
2289 } | |
2290 | |
2291 | |
2292 void MarkCompactCollector::MarkLiveObjects() { | |
2293 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK); | |
2294 double start_time = 0.0; | |
2295 if (FLAG_print_cumulative_gc_stat) { | |
2296 start_time = base::OS::TimeCurrentMillis(); | |
2297 } | |
2298 // The recursive GC marker detects when it is nearing stack overflow, | |
2299 // and switches to a different marking system. JS interrupts interfere | |
2300 // with the C stack limit check. | |
2301 PostponeInterruptsScope postpone(isolate()); | |
2302 | |
2303 bool incremental_marking_overflowed = false; | |
2304 IncrementalMarking* incremental_marking = heap_->incremental_marking(); | |
2305 if (was_marked_incrementally_) { | |
2306 // Finalize the incremental marking and check whether we had an overflow. | |
2307 // Both markers use grey color to mark overflowed objects so | |
2308 // non-incremental marker can deal with them as if overflow | |
2309 // occured during normal marking. | |
2310 // But incremental marker uses a separate marking deque | |
2311 // so we have to explicitly copy its overflow state. | |
2312 incremental_marking->Finalize(); | |
2313 incremental_marking_overflowed = | |
2314 incremental_marking->marking_deque()->overflowed(); | |
2315 incremental_marking->marking_deque()->ClearOverflowed(); | |
2316 } else { | |
2317 // Abort any pending incremental activities e.g. incremental sweeping. | |
2318 incremental_marking->Abort(); | |
2319 } | |
2320 | |
2321 #ifdef DEBUG | |
2322 DCHECK(state_ == PREPARE_GC); | |
2323 state_ = MARK_LIVE_OBJECTS; | |
2324 #endif | |
2325 // The to space contains live objects, a page in from space is used as a | |
2326 // marking stack. | |
2327 Address marking_deque_start = heap()->new_space()->FromSpacePageLow(); | |
2328 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh(); | |
2329 if (FLAG_force_marking_deque_overflows) { | |
2330 marking_deque_end = marking_deque_start + 64 * kPointerSize; | |
2331 } | |
2332 marking_deque_.Initialize(marking_deque_start, | |
2333 marking_deque_end); | |
2334 DCHECK(!marking_deque_.overflowed()); | |
2335 | |
2336 if (incremental_marking_overflowed) { | |
2337 // There are overflowed objects left in the heap after incremental marking. | |
2338 marking_deque_.SetOverflowed(); | |
2339 } | |
2340 | |
2341 PrepareForCodeFlushing(); | |
2342 | |
2343 if (was_marked_incrementally_) { | |
2344 // There is no write barrier on cells so we have to scan them now at the end | |
2345 // of the incremental marking. | |
2346 { | |
2347 HeapObjectIterator cell_iterator(heap()->cell_space()); | |
2348 HeapObject* cell; | |
2349 while ((cell = cell_iterator.Next()) != NULL) { | |
2350 DCHECK(cell->IsCell()); | |
2351 if (IsMarked(cell)) { | |
2352 int offset = Cell::kValueOffset; | |
2353 MarkCompactMarkingVisitor::VisitPointer( | |
2354 heap(), | |
2355 reinterpret_cast<Object**>(cell->address() + offset)); | |
2356 } | |
2357 } | |
2358 } | |
2359 { | |
2360 HeapObjectIterator js_global_property_cell_iterator( | |
2361 heap()->property_cell_space()); | |
2362 HeapObject* cell; | |
2363 while ((cell = js_global_property_cell_iterator.Next()) != NULL) { | |
2364 DCHECK(cell->IsPropertyCell()); | |
2365 if (IsMarked(cell)) { | |
2366 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell); | |
2367 } | |
2368 } | |
2369 } | |
2370 } | |
2371 | |
2372 RootMarkingVisitor root_visitor(heap()); | |
2373 MarkRoots(&root_visitor); | |
2374 | |
2375 ProcessTopOptimizedFrame(&root_visitor); | |
2376 | |
2377 // The objects reachable from the roots are marked, yet unreachable | |
2378 // objects are unmarked. Mark objects reachable due to host | |
2379 // application specific logic or through Harmony weak maps. | |
2380 ProcessEphemeralMarking(&root_visitor); | |
2381 | |
2382 // The objects reachable from the roots, weak maps or object groups | |
2383 // are marked, yet unreachable objects are unmarked. Mark objects | |
2384 // reachable only from weak global handles. | |
2385 // | |
2386 // First we identify nonlive weak handles and mark them as pending | |
2387 // destruction. | |
2388 heap()->isolate()->global_handles()->IdentifyWeakHandles( | |
2389 &IsUnmarkedHeapObject); | |
2390 // Then we mark the objects and process the transitive closure. | |
2391 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); | |
2392 while (marking_deque_.overflowed()) { | |
2393 RefillMarkingDeque(); | |
2394 EmptyMarkingDeque(); | |
2395 } | |
2396 | |
2397 // Repeat host application specific and Harmony weak maps marking to | |
2398 // mark unmarked objects reachable from the weak roots. | |
2399 ProcessEphemeralMarking(&root_visitor); | |
2400 | |
2401 AfterMarking(); | |
2402 | |
2403 if (FLAG_print_cumulative_gc_stat) { | |
2404 heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time); | |
2405 } | |
2406 } | |
2407 | |
2408 | |
2409 void MarkCompactCollector::AfterMarking() { | |
2410 // Object literal map caches reference strings (cache keys) and maps | |
2411 // (cache values). At this point still useful maps have already been | |
2412 // marked. Mark the keys for the alive values before we process the | |
2413 // string table. | |
2414 ProcessMapCaches(); | |
2415 | |
2416 // Prune the string table removing all strings only pointed to by the | |
2417 // string table. Cannot use string_table() here because the string | |
2418 // table is marked. | |
2419 StringTable* string_table = heap()->string_table(); | |
2420 InternalizedStringTableCleaner internalized_visitor(heap()); | |
2421 string_table->IterateElements(&internalized_visitor); | |
2422 string_table->ElementsRemoved(internalized_visitor.PointersRemoved()); | |
2423 | |
2424 ExternalStringTableCleaner external_visitor(heap()); | |
2425 heap()->external_string_table_.Iterate(&external_visitor); | |
2426 heap()->external_string_table_.CleanUp(); | |
2427 | |
2428 // Process the weak references. | |
2429 MarkCompactWeakObjectRetainer mark_compact_object_retainer; | |
2430 heap()->ProcessWeakReferences(&mark_compact_object_retainer); | |
2431 | |
2432 // Remove object groups after marking phase. | |
2433 heap()->isolate()->global_handles()->RemoveObjectGroups(); | |
2434 heap()->isolate()->global_handles()->RemoveImplicitRefGroups(); | |
2435 | |
2436 // Flush code from collected candidates. | |
2437 if (is_code_flushing_enabled()) { | |
2438 code_flusher_->ProcessCandidates(); | |
2439 // If incremental marker does not support code flushing, we need to | |
2440 // disable it before incremental marking steps for next cycle. | |
2441 if (FLAG_flush_code && !FLAG_flush_code_incrementally) { | |
2442 EnableCodeFlushing(false); | |
2443 } | |
2444 } | |
2445 | |
2446 if (FLAG_track_gc_object_stats) { | |
2447 heap()->CheckpointObjectStats(); | |
2448 } | |
2449 } | |
2450 | |
2451 | |
2452 void MarkCompactCollector::ProcessMapCaches() { | |
2453 Object* raw_context = heap()->native_contexts_list(); | |
2454 while (raw_context != heap()->undefined_value()) { | |
2455 Context* context = reinterpret_cast<Context*>(raw_context); | |
2456 if (IsMarked(context)) { | |
2457 HeapObject* raw_map_cache = | |
2458 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX)); | |
2459 // A map cache may be reachable from the stack. In this case | |
2460 // it's already transitively marked and it's too late to clean | |
2461 // up its parts. | |
2462 if (!IsMarked(raw_map_cache) && | |
2463 raw_map_cache != heap()->undefined_value()) { | |
2464 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache); | |
2465 int existing_elements = map_cache->NumberOfElements(); | |
2466 int used_elements = 0; | |
2467 for (int i = MapCache::kElementsStartIndex; | |
2468 i < map_cache->length(); | |
2469 i += MapCache::kEntrySize) { | |
2470 Object* raw_key = map_cache->get(i); | |
2471 if (raw_key == heap()->undefined_value() || | |
2472 raw_key == heap()->the_hole_value()) continue; | |
2473 STATIC_ASSERT(MapCache::kEntrySize == 2); | |
2474 Object* raw_map = map_cache->get(i + 1); | |
2475 if (raw_map->IsHeapObject() && IsMarked(raw_map)) { | |
2476 ++used_elements; | |
2477 } else { | |
2478 // Delete useless entries with unmarked maps. | |
2479 DCHECK(raw_map->IsMap()); | |
2480 map_cache->set_the_hole(i); | |
2481 map_cache->set_the_hole(i + 1); | |
2482 } | |
2483 } | |
2484 if (used_elements == 0) { | |
2485 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value()); | |
2486 } else { | |
2487 // Note: we don't actually shrink the cache here to avoid | |
2488 // extra complexity during GC. We rely on subsequent cache | |
2489 // usages (EnsureCapacity) to do this. | |
2490 map_cache->ElementsRemoved(existing_elements - used_elements); | |
2491 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache); | |
2492 MarkObject(map_cache, map_cache_markbit); | |
2493 } | |
2494 } | |
2495 } | |
2496 // Move to next element in the list. | |
2497 raw_context = context->get(Context::NEXT_CONTEXT_LINK); | |
2498 } | |
2499 ProcessMarkingDeque(); | |
2500 } | |
2501 | |
2502 | |
2503 void MarkCompactCollector::ClearNonLiveReferences() { | |
2504 // Iterate over the map space, setting map transitions that go from | |
2505 // a marked map to an unmarked map to null transitions. This action | |
2506 // is carried out only on maps of JSObjects and related subtypes. | |
2507 HeapObjectIterator map_iterator(heap()->map_space()); | |
2508 for (HeapObject* obj = map_iterator.Next(); | |
2509 obj != NULL; | |
2510 obj = map_iterator.Next()) { | |
2511 Map* map = Map::cast(obj); | |
2512 | |
2513 if (!map->CanTransition()) continue; | |
2514 | |
2515 MarkBit map_mark = Marking::MarkBitFrom(map); | |
2516 ClearNonLivePrototypeTransitions(map); | |
2517 ClearNonLiveMapTransitions(map, map_mark); | |
2518 | |
2519 if (map_mark.Get()) { | |
2520 ClearNonLiveDependentCode(map->dependent_code()); | |
2521 } else { | |
2522 ClearDependentCode(map->dependent_code()); | |
2523 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array())); | |
2524 } | |
2525 } | |
2526 | |
2527 // Iterate over property cell space, removing dependent code that is not | |
2528 // otherwise kept alive by strong references. | |
2529 HeapObjectIterator cell_iterator(heap_->property_cell_space()); | |
2530 for (HeapObject* cell = cell_iterator.Next(); | |
2531 cell != NULL; | |
2532 cell = cell_iterator.Next()) { | |
2533 if (IsMarked(cell)) { | |
2534 ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code()); | |
2535 } | |
2536 } | |
2537 | |
2538 // Iterate over allocation sites, removing dependent code that is not | |
2539 // otherwise kept alive by strong references. | |
2540 Object* undefined = heap()->undefined_value(); | |
2541 for (Object* site = heap()->allocation_sites_list(); | |
2542 site != undefined; | |
2543 site = AllocationSite::cast(site)->weak_next()) { | |
2544 if (IsMarked(site)) { | |
2545 ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code()); | |
2546 } | |
2547 } | |
2548 | |
2549 if (heap_->weak_object_to_code_table()->IsHashTable()) { | |
2550 WeakHashTable* table = | |
2551 WeakHashTable::cast(heap_->weak_object_to_code_table()); | |
2552 uint32_t capacity = table->Capacity(); | |
2553 for (uint32_t i = 0; i < capacity; i++) { | |
2554 uint32_t key_index = table->EntryToIndex(i); | |
2555 Object* key = table->get(key_index); | |
2556 if (!table->IsKey(key)) continue; | |
2557 uint32_t value_index = table->EntryToValueIndex(i); | |
2558 Object* value = table->get(value_index); | |
2559 if (key->IsCell() && !IsMarked(key)) { | |
2560 Cell* cell = Cell::cast(key); | |
2561 Object* object = cell->value(); | |
2562 if (IsMarked(object)) { | |
2563 MarkBit mark = Marking::MarkBitFrom(cell); | |
2564 SetMark(cell, mark); | |
2565 Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset); | |
2566 RecordSlot(value_slot, value_slot, *value_slot); | |
2567 } | |
2568 } | |
2569 if (IsMarked(key)) { | |
2570 if (!IsMarked(value)) { | |
2571 HeapObject* obj = HeapObject::cast(value); | |
2572 MarkBit mark = Marking::MarkBitFrom(obj); | |
2573 SetMark(obj, mark); | |
2574 } | |
2575 ClearNonLiveDependentCode(DependentCode::cast(value)); | |
2576 } else { | |
2577 ClearDependentCode(DependentCode::cast(value)); | |
2578 table->set(key_index, heap_->the_hole_value()); | |
2579 table->set(value_index, heap_->the_hole_value()); | |
2580 table->ElementRemoved(); | |
2581 } | |
2582 } | |
2583 } | |
2584 } | |
2585 | |
2586 | |
2587 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) { | |
2588 int number_of_transitions = map->NumberOfProtoTransitions(); | |
2589 FixedArray* prototype_transitions = map->GetPrototypeTransitions(); | |
2590 | |
2591 int new_number_of_transitions = 0; | |
2592 const int header = Map::kProtoTransitionHeaderSize; | |
2593 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset; | |
2594 const int map_offset = header + Map::kProtoTransitionMapOffset; | |
2595 const int step = Map::kProtoTransitionElementsPerEntry; | |
2596 for (int i = 0; i < number_of_transitions; i++) { | |
2597 Object* prototype = prototype_transitions->get(proto_offset + i * step); | |
2598 Object* cached_map = prototype_transitions->get(map_offset + i * step); | |
2599 if (IsMarked(prototype) && IsMarked(cached_map)) { | |
2600 DCHECK(!prototype->IsUndefined()); | |
2601 int proto_index = proto_offset + new_number_of_transitions * step; | |
2602 int map_index = map_offset + new_number_of_transitions * step; | |
2603 if (new_number_of_transitions != i) { | |
2604 prototype_transitions->set( | |
2605 proto_index, | |
2606 prototype, | |
2607 UPDATE_WRITE_BARRIER); | |
2608 prototype_transitions->set( | |
2609 map_index, | |
2610 cached_map, | |
2611 SKIP_WRITE_BARRIER); | |
2612 } | |
2613 Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index); | |
2614 RecordSlot(slot, slot, prototype); | |
2615 new_number_of_transitions++; | |
2616 } | |
2617 } | |
2618 | |
2619 if (new_number_of_transitions != number_of_transitions) { | |
2620 map->SetNumberOfProtoTransitions(new_number_of_transitions); | |
2621 } | |
2622 | |
2623 // Fill slots that became free with undefined value. | |
2624 for (int i = new_number_of_transitions * step; | |
2625 i < number_of_transitions * step; | |
2626 i++) { | |
2627 prototype_transitions->set_undefined(header + i); | |
2628 } | |
2629 } | |
2630 | |
2631 | |
2632 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map, | |
2633 MarkBit map_mark) { | |
2634 Object* potential_parent = map->GetBackPointer(); | |
2635 if (!potential_parent->IsMap()) return; | |
2636 Map* parent = Map::cast(potential_parent); | |
2637 | |
2638 // Follow back pointer, check whether we are dealing with a map transition | |
2639 // from a live map to a dead path and in case clear transitions of parent. | |
2640 bool current_is_alive = map_mark.Get(); | |
2641 bool parent_is_alive = Marking::MarkBitFrom(parent).Get(); | |
2642 if (!current_is_alive && parent_is_alive) { | |
2643 parent->ClearNonLiveTransitions(heap()); | |
2644 } | |
2645 } | |
2646 | |
2647 | |
2648 void MarkCompactCollector::ClearDependentICList(Object* head) { | |
2649 Object* current = head; | |
2650 Object* undefined = heap()->undefined_value(); | |
2651 while (current != undefined) { | |
2652 Code* code = Code::cast(current); | |
2653 if (IsMarked(code)) { | |
2654 DCHECK(code->is_weak_stub()); | |
2655 IC::InvalidateMaps(code); | |
2656 } | |
2657 current = code->next_code_link(); | |
2658 code->set_next_code_link(undefined); | |
2659 } | |
2660 } | |
2661 | |
2662 | |
2663 void MarkCompactCollector::ClearDependentCode( | |
2664 DependentCode* entries) { | |
2665 DisallowHeapAllocation no_allocation; | |
2666 DependentCode::GroupStartIndexes starts(entries); | |
2667 int number_of_entries = starts.number_of_entries(); | |
2668 if (number_of_entries == 0) return; | |
2669 int g = DependentCode::kWeakICGroup; | |
2670 if (starts.at(g) != starts.at(g + 1)) { | |
2671 int i = starts.at(g); | |
2672 DCHECK(i + 1 == starts.at(g + 1)); | |
2673 Object* head = entries->object_at(i); | |
2674 ClearDependentICList(head); | |
2675 } | |
2676 g = DependentCode::kWeakCodeGroup; | |
2677 for (int i = starts.at(g); i < starts.at(g + 1); i++) { | |
2678 // If the entry is compilation info then the map must be alive, | |
2679 // and ClearDependentCode shouldn't be called. | |
2680 DCHECK(entries->is_code_at(i)); | |
2681 Code* code = entries->code_at(i); | |
2682 if (IsMarked(code) && !code->marked_for_deoptimization()) { | |
2683 code->set_marked_for_deoptimization(true); | |
2684 code->InvalidateEmbeddedObjects(); | |
2685 have_code_to_deoptimize_ = true; | |
2686 } | |
2687 } | |
2688 for (int i = 0; i < number_of_entries; i++) { | |
2689 entries->clear_at(i); | |
2690 } | |
2691 } | |
2692 | |
2693 | |
2694 int MarkCompactCollector::ClearNonLiveDependentCodeInGroup( | |
2695 DependentCode* entries, int group, int start, int end, int new_start) { | |
2696 int survived = 0; | |
2697 if (group == DependentCode::kWeakICGroup) { | |
2698 // Dependent weak IC stubs form a linked list and only the head is stored | |
2699 // in the dependent code array. | |
2700 if (start != end) { | |
2701 DCHECK(start + 1 == end); | |
2702 Object* old_head = entries->object_at(start); | |
2703 MarkCompactWeakObjectRetainer retainer; | |
2704 Object* head = VisitWeakList<Code>(heap(), old_head, &retainer); | |
2705 entries->set_object_at(new_start, head); | |
2706 Object** slot = entries->slot_at(new_start); | |
2707 RecordSlot(slot, slot, head); | |
2708 // We do not compact this group even if the head is undefined, | |
2709 // more dependent ICs are likely to be added later. | |
2710 survived = 1; | |
2711 } | |
2712 } else { | |
2713 for (int i = start; i < end; i++) { | |
2714 Object* obj = entries->object_at(i); | |
2715 DCHECK(obj->IsCode() || IsMarked(obj)); | |
2716 if (IsMarked(obj) && | |
2717 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) { | |
2718 if (new_start + survived != i) { | |
2719 entries->set_object_at(new_start + survived, obj); | |
2720 } | |
2721 Object** slot = entries->slot_at(new_start + survived); | |
2722 RecordSlot(slot, slot, obj); | |
2723 survived++; | |
2724 } | |
2725 } | |
2726 } | |
2727 entries->set_number_of_entries( | |
2728 static_cast<DependentCode::DependencyGroup>(group), survived); | |
2729 return survived; | |
2730 } | |
2731 | |
2732 | |
2733 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) { | |
2734 DisallowHeapAllocation no_allocation; | |
2735 DependentCode::GroupStartIndexes starts(entries); | |
2736 int number_of_entries = starts.number_of_entries(); | |
2737 if (number_of_entries == 0) return; | |
2738 int new_number_of_entries = 0; | |
2739 // Go through all groups, remove dead codes and compact. | |
2740 for (int g = 0; g < DependentCode::kGroupCount; g++) { | |
2741 int survived = ClearNonLiveDependentCodeInGroup( | |
2742 entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries); | |
2743 new_number_of_entries += survived; | |
2744 } | |
2745 for (int i = new_number_of_entries; i < number_of_entries; i++) { | |
2746 entries->clear_at(i); | |
2747 } | |
2748 } | |
2749 | |
2750 | |
2751 void MarkCompactCollector::ProcessWeakCollections() { | |
2752 GCTracer::Scope gc_scope(heap()->tracer(), | |
2753 GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS); | |
2754 Object* weak_collection_obj = heap()->encountered_weak_collections(); | |
2755 while (weak_collection_obj != Smi::FromInt(0)) { | |
2756 JSWeakCollection* weak_collection = | |
2757 reinterpret_cast<JSWeakCollection*>(weak_collection_obj); | |
2758 DCHECK(MarkCompactCollector::IsMarked(weak_collection)); | |
2759 if (weak_collection->table()->IsHashTable()) { | |
2760 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table()); | |
2761 Object** anchor = reinterpret_cast<Object**>(table->address()); | |
2762 for (int i = 0; i < table->Capacity(); i++) { | |
2763 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { | |
2764 Object** key_slot = | |
2765 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i)); | |
2766 RecordSlot(anchor, key_slot, *key_slot); | |
2767 Object** value_slot = | |
2768 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i)); | |
2769 MarkCompactMarkingVisitor::MarkObjectByPointer( | |
2770 this, anchor, value_slot); | |
2771 } | |
2772 } | |
2773 } | |
2774 weak_collection_obj = weak_collection->next(); | |
2775 } | |
2776 } | |
2777 | |
2778 | |
2779 void MarkCompactCollector::ClearWeakCollections() { | |
2780 GCTracer::Scope gc_scope(heap()->tracer(), | |
2781 GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR); | |
2782 Object* weak_collection_obj = heap()->encountered_weak_collections(); | |
2783 while (weak_collection_obj != Smi::FromInt(0)) { | |
2784 JSWeakCollection* weak_collection = | |
2785 reinterpret_cast<JSWeakCollection*>(weak_collection_obj); | |
2786 DCHECK(MarkCompactCollector::IsMarked(weak_collection)); | |
2787 if (weak_collection->table()->IsHashTable()) { | |
2788 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table()); | |
2789 for (int i = 0; i < table->Capacity(); i++) { | |
2790 HeapObject* key = HeapObject::cast(table->KeyAt(i)); | |
2791 if (!MarkCompactCollector::IsMarked(key)) { | |
2792 table->RemoveEntry(i); | |
2793 } | |
2794 } | |
2795 } | |
2796 weak_collection_obj = weak_collection->next(); | |
2797 weak_collection->set_next(heap()->undefined_value()); | |
2798 } | |
2799 heap()->set_encountered_weak_collections(Smi::FromInt(0)); | |
2800 } | |
2801 | |
2802 | |
2803 void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) { | |
2804 if (heap_->InNewSpace(value)) { | |
2805 heap_->store_buffer()->Mark(slot); | |
2806 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { | |
2807 SlotsBuffer::AddTo(&slots_buffer_allocator_, | |
2808 &migration_slots_buffer_, | |
2809 reinterpret_cast<Object**>(slot), | |
2810 SlotsBuffer::IGNORE_OVERFLOW); | |
2811 } | |
2812 } | |
2813 | |
2814 | |
2815 | |
2816 // We scavange new space simultaneously with sweeping. This is done in two | |
2817 // passes. | |
2818 // | |
2819 // The first pass migrates all alive objects from one semispace to another or | |
2820 // promotes them to old space. Forwarding address is written directly into | |
2821 // first word of object without any encoding. If object is dead we write | |
2822 // NULL as a forwarding address. | |
2823 // | |
2824 // The second pass updates pointers to new space in all spaces. It is possible | |
2825 // to encounter pointers to dead new space objects during traversal of pointers | |
2826 // to new space. We should clear them to avoid encountering them during next | |
2827 // pointer iteration. This is an issue if the store buffer overflows and we | |
2828 // have to scan the entire old space, including dead objects, looking for | |
2829 // pointers to new space. | |
2830 void MarkCompactCollector::MigrateObject(HeapObject* dst, | |
2831 HeapObject* src, | |
2832 int size, | |
2833 AllocationSpace dest) { | |
2834 Address dst_addr = dst->address(); | |
2835 Address src_addr = src->address(); | |
2836 DCHECK(heap()->AllowedToBeMigrated(src, dest)); | |
2837 DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize); | |
2838 if (dest == OLD_POINTER_SPACE) { | |
2839 Address src_slot = src_addr; | |
2840 Address dst_slot = dst_addr; | |
2841 DCHECK(IsAligned(size, kPointerSize)); | |
2842 | |
2843 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { | |
2844 Object* value = Memory::Object_at(src_slot); | |
2845 | |
2846 Memory::Object_at(dst_slot) = value; | |
2847 | |
2848 // We special case ConstantPoolArrays below since they could contain | |
2849 // integers value entries which look like tagged pointers. | |
2850 // TODO(mstarzinger): restructure this code to avoid this special-casing. | |
2851 if (!src->IsConstantPoolArray()) { | |
2852 RecordMigratedSlot(value, dst_slot); | |
2853 } | |
2854 | |
2855 src_slot += kPointerSize; | |
2856 dst_slot += kPointerSize; | |
2857 } | |
2858 | |
2859 if (compacting_ && dst->IsJSFunction()) { | |
2860 Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset; | |
2861 Address code_entry = Memory::Address_at(code_entry_slot); | |
2862 | |
2863 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { | |
2864 SlotsBuffer::AddTo(&slots_buffer_allocator_, | |
2865 &migration_slots_buffer_, | |
2866 SlotsBuffer::CODE_ENTRY_SLOT, | |
2867 code_entry_slot, | |
2868 SlotsBuffer::IGNORE_OVERFLOW); | |
2869 } | |
2870 } else if (dst->IsConstantPoolArray()) { | |
2871 ConstantPoolArray* array = ConstantPoolArray::cast(dst); | |
2872 ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR); | |
2873 while (!code_iter.is_finished()) { | |
2874 Address code_entry_slot = | |
2875 dst_addr + array->OffsetOfElementAt(code_iter.next_index()); | |
2876 Address code_entry = Memory::Address_at(code_entry_slot); | |
2877 | |
2878 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { | |
2879 SlotsBuffer::AddTo(&slots_buffer_allocator_, | |
2880 &migration_slots_buffer_, | |
2881 SlotsBuffer::CODE_ENTRY_SLOT, | |
2882 code_entry_slot, | |
2883 SlotsBuffer::IGNORE_OVERFLOW); | |
2884 } | |
2885 } | |
2886 ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR); | |
2887 while (!heap_iter.is_finished()) { | |
2888 Address heap_slot = | |
2889 dst_addr + array->OffsetOfElementAt(heap_iter.next_index()); | |
2890 Object* value = Memory::Object_at(heap_slot); | |
2891 RecordMigratedSlot(value, heap_slot); | |
2892 } | |
2893 } | |
2894 } else if (dest == CODE_SPACE) { | |
2895 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); | |
2896 heap()->MoveBlock(dst_addr, src_addr, size); | |
2897 SlotsBuffer::AddTo(&slots_buffer_allocator_, | |
2898 &migration_slots_buffer_, | |
2899 SlotsBuffer::RELOCATED_CODE_OBJECT, | |
2900 dst_addr, | |
2901 SlotsBuffer::IGNORE_OVERFLOW); | |
2902 Code::cast(dst)->Relocate(dst_addr - src_addr); | |
2903 } else { | |
2904 DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE); | |
2905 heap()->MoveBlock(dst_addr, src_addr, size); | |
2906 } | |
2907 heap()->OnMoveEvent(dst, src, size); | |
2908 Memory::Address_at(src_addr) = dst_addr; | |
2909 } | |
2910 | |
2911 | |
2912 // Visitor for updating pointers from live objects in old spaces to new space. | |
2913 // It does not expect to encounter pointers to dead objects. | |
2914 class PointersUpdatingVisitor: public ObjectVisitor { | |
2915 public: | |
2916 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { } | |
2917 | |
2918 void VisitPointer(Object** p) { | |
2919 UpdatePointer(p); | |
2920 } | |
2921 | |
2922 void VisitPointers(Object** start, Object** end) { | |
2923 for (Object** p = start; p < end; p++) UpdatePointer(p); | |
2924 } | |
2925 | |
2926 void VisitEmbeddedPointer(RelocInfo* rinfo) { | |
2927 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); | |
2928 Object* target = rinfo->target_object(); | |
2929 Object* old_target = target; | |
2930 VisitPointer(&target); | |
2931 // Avoid unnecessary changes that might unnecessary flush the instruction | |
2932 // cache. | |
2933 if (target != old_target) { | |
2934 rinfo->set_target_object(target); | |
2935 } | |
2936 } | |
2937 | |
2938 void VisitCodeTarget(RelocInfo* rinfo) { | |
2939 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); | |
2940 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); | |
2941 Object* old_target = target; | |
2942 VisitPointer(&target); | |
2943 if (target != old_target) { | |
2944 rinfo->set_target_address(Code::cast(target)->instruction_start()); | |
2945 } | |
2946 } | |
2947 | |
2948 void VisitCodeAgeSequence(RelocInfo* rinfo) { | |
2949 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); | |
2950 Object* stub = rinfo->code_age_stub(); | |
2951 DCHECK(stub != NULL); | |
2952 VisitPointer(&stub); | |
2953 if (stub != rinfo->code_age_stub()) { | |
2954 rinfo->set_code_age_stub(Code::cast(stub)); | |
2955 } | |
2956 } | |
2957 | |
2958 void VisitDebugTarget(RelocInfo* rinfo) { | |
2959 DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) && | |
2960 rinfo->IsPatchedReturnSequence()) || | |
2961 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && | |
2962 rinfo->IsPatchedDebugBreakSlotSequence())); | |
2963 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); | |
2964 VisitPointer(&target); | |
2965 rinfo->set_call_address(Code::cast(target)->instruction_start()); | |
2966 } | |
2967 | |
2968 static inline void UpdateSlot(Heap* heap, Object** slot) { | |
2969 Object* obj = *slot; | |
2970 | |
2971 if (!obj->IsHeapObject()) return; | |
2972 | |
2973 HeapObject* heap_obj = HeapObject::cast(obj); | |
2974 | |
2975 MapWord map_word = heap_obj->map_word(); | |
2976 if (map_word.IsForwardingAddress()) { | |
2977 DCHECK(heap->InFromSpace(heap_obj) || | |
2978 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj)); | |
2979 HeapObject* target = map_word.ToForwardingAddress(); | |
2980 *slot = target; | |
2981 DCHECK(!heap->InFromSpace(target) && | |
2982 !MarkCompactCollector::IsOnEvacuationCandidate(target)); | |
2983 } | |
2984 } | |
2985 | |
2986 private: | |
2987 inline void UpdatePointer(Object** p) { | |
2988 UpdateSlot(heap_, p); | |
2989 } | |
2990 | |
2991 Heap* heap_; | |
2992 }; | |
2993 | |
2994 | |
2995 static void UpdatePointer(HeapObject** address, HeapObject* object) { | |
2996 Address new_addr = Memory::Address_at(object->address()); | |
2997 | |
2998 // The new space sweep will overwrite the map word of dead objects | |
2999 // with NULL. In this case we do not need to transfer this entry to | |
3000 // the store buffer which we are rebuilding. | |
3001 // We perform the pointer update with a no barrier compare-and-swap. The | |
3002 // compare and swap may fail in the case where the pointer update tries to | |
3003 // update garbage memory which was concurrently accessed by the sweeper. | |
3004 if (new_addr != NULL) { | |
3005 base::NoBarrier_CompareAndSwap( | |
3006 reinterpret_cast<base::AtomicWord*>(address), | |
3007 reinterpret_cast<base::AtomicWord>(object), | |
3008 reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr))); | |
3009 } else { | |
3010 // We have to zap this pointer, because the store buffer may overflow later, | |
3011 // and then we have to scan the entire heap and we don't want to find | |
3012 // spurious newspace pointers in the old space. | |
3013 // TODO(mstarzinger): This was changed to a sentinel value to track down | |
3014 // rare crashes, change it back to Smi::FromInt(0) later. | |
3015 base::NoBarrier_CompareAndSwap( | |
3016 reinterpret_cast<base::AtomicWord*>(address), | |
3017 reinterpret_cast<base::AtomicWord>(object), | |
3018 reinterpret_cast<base::AtomicWord>(Smi::FromInt(0x0f100d00 >> 1))); | |
3019 } | |
3020 } | |
3021 | |
3022 | |
3023 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap, | |
3024 Object** p) { | |
3025 MapWord map_word = HeapObject::cast(*p)->map_word(); | |
3026 | |
3027 if (map_word.IsForwardingAddress()) { | |
3028 return String::cast(map_word.ToForwardingAddress()); | |
3029 } | |
3030 | |
3031 return String::cast(*p); | |
3032 } | |
3033 | |
3034 | |
3035 bool MarkCompactCollector::TryPromoteObject(HeapObject* object, | |
3036 int object_size) { | |
3037 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); | |
3038 | |
3039 OldSpace* target_space = heap()->TargetSpace(object); | |
3040 | |
3041 DCHECK(target_space == heap()->old_pointer_space() || | |
3042 target_space == heap()->old_data_space()); | |
3043 HeapObject* target; | |
3044 AllocationResult allocation = target_space->AllocateRaw(object_size); | |
3045 if (allocation.To(&target)) { | |
3046 MigrateObject(target, | |
3047 object, | |
3048 object_size, | |
3049 target_space->identity()); | |
3050 heap()->IncrementPromotedObjectsSize(object_size); | |
3051 return true; | |
3052 } | |
3053 | |
3054 return false; | |
3055 } | |
3056 | |
3057 | |
3058 void MarkCompactCollector::EvacuateNewSpace() { | |
3059 // There are soft limits in the allocation code, designed trigger a mark | |
3060 // sweep collection by failing allocations. But since we are already in | |
3061 // a mark-sweep allocation, there is no sense in trying to trigger one. | |
3062 AlwaysAllocateScope scope(isolate()); | |
3063 | |
3064 NewSpace* new_space = heap()->new_space(); | |
3065 | |
3066 // Store allocation range before flipping semispaces. | |
3067 Address from_bottom = new_space->bottom(); | |
3068 Address from_top = new_space->top(); | |
3069 | |
3070 // Flip the semispaces. After flipping, to space is empty, from space has | |
3071 // live objects. | |
3072 new_space->Flip(); | |
3073 new_space->ResetAllocationInfo(); | |
3074 | |
3075 int survivors_size = 0; | |
3076 | |
3077 // First pass: traverse all objects in inactive semispace, remove marks, | |
3078 // migrate live objects and write forwarding addresses. This stage puts | |
3079 // new entries in the store buffer and may cause some pages to be marked | |
3080 // scan-on-scavenge. | |
3081 NewSpacePageIterator it(from_bottom, from_top); | |
3082 while (it.has_next()) { | |
3083 NewSpacePage* p = it.next(); | |
3084 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); | |
3085 } | |
3086 | |
3087 heap_->IncrementYoungSurvivorsCounter(survivors_size); | |
3088 new_space->set_age_mark(new_space->top()); | |
3089 } | |
3090 | |
3091 | |
3092 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { | |
3093 AlwaysAllocateScope always_allocate(isolate()); | |
3094 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | |
3095 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); | |
3096 p->MarkSweptPrecisely(); | |
3097 | |
3098 int offsets[16]; | |
3099 | |
3100 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { | |
3101 Address cell_base = it.CurrentCellBase(); | |
3102 MarkBit::CellType* cell = it.CurrentCell(); | |
3103 | |
3104 if (*cell == 0) continue; | |
3105 | |
3106 int live_objects = MarkWordToObjectStarts(*cell, offsets); | |
3107 for (int i = 0; i < live_objects; i++) { | |
3108 Address object_addr = cell_base + offsets[i] * kPointerSize; | |
3109 HeapObject* object = HeapObject::FromAddress(object_addr); | |
3110 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | |
3111 | |
3112 int size = object->Size(); | |
3113 | |
3114 HeapObject* target_object; | |
3115 AllocationResult allocation = space->AllocateRaw(size); | |
3116 if (!allocation.To(&target_object)) { | |
3117 // If allocation failed, use emergency memory and re-try allocation. | |
3118 CHECK(space->HasEmergencyMemory()); | |
3119 space->UseEmergencyMemory(); | |
3120 allocation = space->AllocateRaw(size); | |
3121 } | |
3122 if (!allocation.To(&target_object)) { | |
3123 // OS refused to give us memory. | |
3124 V8::FatalProcessOutOfMemory("Evacuation"); | |
3125 return; | |
3126 } | |
3127 | |
3128 MigrateObject(target_object, object, size, space->identity()); | |
3129 DCHECK(object->map_word().IsForwardingAddress()); | |
3130 } | |
3131 | |
3132 // Clear marking bits for current cell. | |
3133 *cell = 0; | |
3134 } | |
3135 p->ResetLiveBytes(); | |
3136 } | |
3137 | |
3138 | |
3139 void MarkCompactCollector::EvacuatePages() { | |
3140 int npages = evacuation_candidates_.length(); | |
3141 for (int i = 0; i < npages; i++) { | |
3142 Page* p = evacuation_candidates_[i]; | |
3143 DCHECK(p->IsEvacuationCandidate() || | |
3144 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | |
3145 DCHECK(static_cast<int>(p->parallel_sweeping()) == | |
3146 MemoryChunk::SWEEPING_DONE); | |
3147 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | |
3148 // Allocate emergency memory for the case when compaction fails due to out | |
3149 // of memory. | |
3150 if (!space->HasEmergencyMemory()) { | |
3151 space->CreateEmergencyMemory(); | |
3152 } | |
3153 if (p->IsEvacuationCandidate()) { | |
3154 // During compaction we might have to request a new page. Check that we | |
3155 // have an emergency page and the space still has room for that. | |
3156 if (space->HasEmergencyMemory() && space->CanExpand()) { | |
3157 EvacuateLiveObjectsFromPage(p); | |
3158 } else { | |
3159 // Without room for expansion evacuation is not guaranteed to succeed. | |
3160 // Pessimistically abandon unevacuated pages. | |
3161 for (int j = i; j < npages; j++) { | |
3162 Page* page = evacuation_candidates_[j]; | |
3163 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); | |
3164 page->ClearEvacuationCandidate(); | |
3165 page->SetFlag(Page::RESCAN_ON_EVACUATION); | |
3166 } | |
3167 break; | |
3168 } | |
3169 } | |
3170 } | |
3171 if (npages > 0) { | |
3172 // Release emergency memory. | |
3173 PagedSpaces spaces(heap()); | |
3174 for (PagedSpace* space = spaces.next(); space != NULL; | |
3175 space = spaces.next()) { | |
3176 if (space->HasEmergencyMemory()) { | |
3177 space->FreeEmergencyMemory(); | |
3178 } | |
3179 } | |
3180 } | |
3181 } | |
3182 | |
3183 | |
3184 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | |
3185 public: | |
3186 virtual Object* RetainAs(Object* object) { | |
3187 if (object->IsHeapObject()) { | |
3188 HeapObject* heap_object = HeapObject::cast(object); | |
3189 MapWord map_word = heap_object->map_word(); | |
3190 if (map_word.IsForwardingAddress()) { | |
3191 return map_word.ToForwardingAddress(); | |
3192 } | |
3193 } | |
3194 return object; | |
3195 } | |
3196 }; | |
3197 | |
3198 | |
3199 static inline void UpdateSlot(Isolate* isolate, | |
3200 ObjectVisitor* v, | |
3201 SlotsBuffer::SlotType slot_type, | |
3202 Address addr) { | |
3203 switch (slot_type) { | |
3204 case SlotsBuffer::CODE_TARGET_SLOT: { | |
3205 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL); | |
3206 rinfo.Visit(isolate, v); | |
3207 break; | |
3208 } | |
3209 case SlotsBuffer::CODE_ENTRY_SLOT: { | |
3210 v->VisitCodeEntry(addr); | |
3211 break; | |
3212 } | |
3213 case SlotsBuffer::RELOCATED_CODE_OBJECT: { | |
3214 HeapObject* obj = HeapObject::FromAddress(addr); | |
3215 Code::cast(obj)->CodeIterateBody(v); | |
3216 break; | |
3217 } | |
3218 case SlotsBuffer::DEBUG_TARGET_SLOT: { | |
3219 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL); | |
3220 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v); | |
3221 break; | |
3222 } | |
3223 case SlotsBuffer::JS_RETURN_SLOT: { | |
3224 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL); | |
3225 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v); | |
3226 break; | |
3227 } | |
3228 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: { | |
3229 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL); | |
3230 rinfo.Visit(isolate, v); | |
3231 break; | |
3232 } | |
3233 default: | |
3234 UNREACHABLE(); | |
3235 break; | |
3236 } | |
3237 } | |
3238 | |
3239 | |
3240 enum SweepingMode { | |
3241 SWEEP_ONLY, | |
3242 SWEEP_AND_VISIT_LIVE_OBJECTS | |
3243 }; | |
3244 | |
3245 | |
3246 enum SkipListRebuildingMode { | |
3247 REBUILD_SKIP_LIST, | |
3248 IGNORE_SKIP_LIST | |
3249 }; | |
3250 | |
3251 | |
3252 enum FreeSpaceTreatmentMode { | |
3253 IGNORE_FREE_SPACE, | |
3254 ZAP_FREE_SPACE | |
3255 }; | |
3256 | |
3257 | |
3258 template<MarkCompactCollector::SweepingParallelism mode> | |
3259 static intptr_t Free(PagedSpace* space, | |
3260 FreeList* free_list, | |
3261 Address start, | |
3262 int size) { | |
3263 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) { | |
3264 DCHECK(free_list == NULL); | |
3265 return space->Free(start, size); | |
3266 } else { | |
3267 // TODO(hpayer): account for wasted bytes in concurrent sweeping too. | |
3268 return size - free_list->Free(start, size); | |
3269 } | |
3270 } | |
3271 | |
3272 | |
3273 // Sweep a space precisely. After this has been done the space can | |
3274 // be iterated precisely, hitting only the live objects. Code space | |
3275 // is always swept precisely because we want to be able to iterate | |
3276 // over it. Map space is swept precisely, because it is not compacted. | |
3277 // Slots in live objects pointing into evacuation candidates are updated | |
3278 // if requested. | |
3279 // Returns the size of the biggest continuous freed memory chunk in bytes. | |
3280 template<SweepingMode sweeping_mode, | |
3281 MarkCompactCollector::SweepingParallelism parallelism, | |
3282 SkipListRebuildingMode skip_list_mode, | |
3283 FreeSpaceTreatmentMode free_space_mode> | |
3284 static int SweepPrecisely(PagedSpace* space, | |
3285 FreeList* free_list, | |
3286 Page* p, | |
3287 ObjectVisitor* v) { | |
3288 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); | |
3289 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, | |
3290 space->identity() == CODE_SPACE); | |
3291 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); | |
3292 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || | |
3293 sweeping_mode == SWEEP_ONLY); | |
3294 | |
3295 Address free_start = p->area_start(); | |
3296 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | |
3297 int offsets[16]; | |
3298 | |
3299 SkipList* skip_list = p->skip_list(); | |
3300 int curr_region = -1; | |
3301 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { | |
3302 skip_list->Clear(); | |
3303 } | |
3304 | |
3305 intptr_t freed_bytes = 0; | |
3306 intptr_t max_freed_bytes = 0; | |
3307 | |
3308 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { | |
3309 Address cell_base = it.CurrentCellBase(); | |
3310 MarkBit::CellType* cell = it.CurrentCell(); | |
3311 int live_objects = MarkWordToObjectStarts(*cell, offsets); | |
3312 int live_index = 0; | |
3313 for ( ; live_objects != 0; live_objects--) { | |
3314 Address free_end = cell_base + offsets[live_index++] * kPointerSize; | |
3315 if (free_end != free_start) { | |
3316 int size = static_cast<int>(free_end - free_start); | |
3317 if (free_space_mode == ZAP_FREE_SPACE) { | |
3318 memset(free_start, 0xcc, size); | |
3319 } | |
3320 freed_bytes = Free<parallelism>(space, free_list, free_start, size); | |
3321 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
3322 #ifdef ENABLE_GDB_JIT_INTERFACE | |
3323 if (FLAG_gdbjit && space->identity() == CODE_SPACE) { | |
3324 GDBJITInterface::RemoveCodeRange(free_start, free_end); | |
3325 } | |
3326 #endif | |
3327 } | |
3328 HeapObject* live_object = HeapObject::FromAddress(free_end); | |
3329 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object))); | |
3330 Map* map = live_object->map(); | |
3331 int size = live_object->SizeFromMap(map); | |
3332 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { | |
3333 live_object->IterateBody(map->instance_type(), size, v); | |
3334 } | |
3335 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { | |
3336 int new_region_start = | |
3337 SkipList::RegionNumber(free_end); | |
3338 int new_region_end = | |
3339 SkipList::RegionNumber(free_end + size - kPointerSize); | |
3340 if (new_region_start != curr_region || | |
3341 new_region_end != curr_region) { | |
3342 skip_list->AddObject(free_end, size); | |
3343 curr_region = new_region_end; | |
3344 } | |
3345 } | |
3346 free_start = free_end + size; | |
3347 } | |
3348 // Clear marking bits for current cell. | |
3349 *cell = 0; | |
3350 } | |
3351 if (free_start != p->area_end()) { | |
3352 int size = static_cast<int>(p->area_end() - free_start); | |
3353 if (free_space_mode == ZAP_FREE_SPACE) { | |
3354 memset(free_start, 0xcc, size); | |
3355 } | |
3356 freed_bytes = Free<parallelism>(space, free_list, free_start, size); | |
3357 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
3358 #ifdef ENABLE_GDB_JIT_INTERFACE | |
3359 if (FLAG_gdbjit && space->identity() == CODE_SPACE) { | |
3360 GDBJITInterface::RemoveCodeRange(free_start, p->area_end()); | |
3361 } | |
3362 #endif | |
3363 } | |
3364 p->ResetLiveBytes(); | |
3365 | |
3366 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) { | |
3367 // When concurrent sweeping is active, the page will be marked after | |
3368 // sweeping by the main thread. | |
3369 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); | |
3370 } else { | |
3371 p->MarkSweptPrecisely(); | |
3372 } | |
3373 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | |
3374 } | |
3375 | |
3376 | |
3377 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { | |
3378 Page* p = Page::FromAddress(code->address()); | |
3379 | |
3380 if (p->IsEvacuationCandidate() || | |
3381 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | |
3382 return false; | |
3383 } | |
3384 | |
3385 Address code_start = code->address(); | |
3386 Address code_end = code_start + code->Size(); | |
3387 | |
3388 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start); | |
3389 uint32_t end_index = | |
3390 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize); | |
3391 | |
3392 Bitmap* b = p->markbits(); | |
3393 | |
3394 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index); | |
3395 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index); | |
3396 | |
3397 MarkBit::CellType* start_cell = start_mark_bit.cell(); | |
3398 MarkBit::CellType* end_cell = end_mark_bit.cell(); | |
3399 | |
3400 if (value) { | |
3401 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1); | |
3402 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1; | |
3403 | |
3404 if (start_cell == end_cell) { | |
3405 *start_cell |= start_mask & end_mask; | |
3406 } else { | |
3407 *start_cell |= start_mask; | |
3408 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) { | |
3409 *cell = ~0; | |
3410 } | |
3411 *end_cell |= end_mask; | |
3412 } | |
3413 } else { | |
3414 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) { | |
3415 *cell = 0; | |
3416 } | |
3417 } | |
3418 | |
3419 return true; | |
3420 } | |
3421 | |
3422 | |
3423 static bool IsOnInvalidatedCodeObject(Address addr) { | |
3424 // We did not record any slots in large objects thus | |
3425 // we can safely go to the page from the slot address. | |
3426 Page* p = Page::FromAddress(addr); | |
3427 | |
3428 // First check owner's identity because old pointer and old data spaces | |
3429 // are swept lazily and might still have non-zero mark-bits on some | |
3430 // pages. | |
3431 if (p->owner()->identity() != CODE_SPACE) return false; | |
3432 | |
3433 // In code space only bits on evacuation candidates (but we don't record | |
3434 // any slots on them) and under invalidated code objects are non-zero. | |
3435 MarkBit mark_bit = | |
3436 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr)); | |
3437 | |
3438 return mark_bit.Get(); | |
3439 } | |
3440 | |
3441 | |
3442 void MarkCompactCollector::InvalidateCode(Code* code) { | |
3443 if (heap_->incremental_marking()->IsCompacting() && | |
3444 !ShouldSkipEvacuationSlotRecording(code)) { | |
3445 DCHECK(compacting_); | |
3446 | |
3447 // If the object is white than no slots were recorded on it yet. | |
3448 MarkBit mark_bit = Marking::MarkBitFrom(code); | |
3449 if (Marking::IsWhite(mark_bit)) return; | |
3450 | |
3451 invalidated_code_.Add(code); | |
3452 } | |
3453 } | |
3454 | |
3455 | |
3456 // Return true if the given code is deoptimized or will be deoptimized. | |
3457 bool MarkCompactCollector::WillBeDeoptimized(Code* code) { | |
3458 return code->is_optimized_code() && code->marked_for_deoptimization(); | |
3459 } | |
3460 | |
3461 | |
3462 bool MarkCompactCollector::MarkInvalidatedCode() { | |
3463 bool code_marked = false; | |
3464 | |
3465 int length = invalidated_code_.length(); | |
3466 for (int i = 0; i < length; i++) { | |
3467 Code* code = invalidated_code_[i]; | |
3468 | |
3469 if (SetMarkBitsUnderInvalidatedCode(code, true)) { | |
3470 code_marked = true; | |
3471 } | |
3472 } | |
3473 | |
3474 return code_marked; | |
3475 } | |
3476 | |
3477 | |
3478 void MarkCompactCollector::RemoveDeadInvalidatedCode() { | |
3479 int length = invalidated_code_.length(); | |
3480 for (int i = 0; i < length; i++) { | |
3481 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL; | |
3482 } | |
3483 } | |
3484 | |
3485 | |
3486 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) { | |
3487 int length = invalidated_code_.length(); | |
3488 for (int i = 0; i < length; i++) { | |
3489 Code* code = invalidated_code_[i]; | |
3490 if (code != NULL) { | |
3491 code->Iterate(visitor); | |
3492 SetMarkBitsUnderInvalidatedCode(code, false); | |
3493 } | |
3494 } | |
3495 invalidated_code_.Rewind(0); | |
3496 } | |
3497 | |
3498 | |
3499 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | |
3500 Heap::RelocationLock relocation_lock(heap()); | |
3501 | |
3502 bool code_slots_filtering_required; | |
3503 { GCTracer::Scope gc_scope(heap()->tracer(), | |
3504 GCTracer::Scope::MC_SWEEP_NEWSPACE); | |
3505 code_slots_filtering_required = MarkInvalidatedCode(); | |
3506 EvacuateNewSpace(); | |
3507 } | |
3508 | |
3509 { GCTracer::Scope gc_scope(heap()->tracer(), | |
3510 GCTracer::Scope::MC_EVACUATE_PAGES); | |
3511 EvacuatePages(); | |
3512 } | |
3513 | |
3514 // Second pass: find pointers to new space and update them. | |
3515 PointersUpdatingVisitor updating_visitor(heap()); | |
3516 | |
3517 { GCTracer::Scope gc_scope(heap()->tracer(), | |
3518 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS); | |
3519 // Update pointers in to space. | |
3520 SemiSpaceIterator to_it(heap()->new_space()->bottom(), | |
3521 heap()->new_space()->top()); | |
3522 for (HeapObject* object = to_it.Next(); | |
3523 object != NULL; | |
3524 object = to_it.Next()) { | |
3525 Map* map = object->map(); | |
3526 object->IterateBody(map->instance_type(), | |
3527 object->SizeFromMap(map), | |
3528 &updating_visitor); | |
3529 } | |
3530 } | |
3531 | |
3532 { GCTracer::Scope gc_scope(heap()->tracer(), | |
3533 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS); | |
3534 // Update roots. | |
3535 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | |
3536 } | |
3537 | |
3538 { GCTracer::Scope gc_scope(heap()->tracer(), | |
3539 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS); | |
3540 StoreBufferRebuildScope scope(heap_, | |
3541 heap_->store_buffer(), | |
3542 &Heap::ScavengeStoreBufferCallback); | |
3543 heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps( | |
3544 &UpdatePointer); | |
3545 } | |
3546 | |
3547 { GCTracer::Scope gc_scope(heap()->tracer(), | |
3548 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED); | |
3549 SlotsBuffer::UpdateSlotsRecordedIn(heap_, | |
3550 migration_slots_buffer_, | |
3551 code_slots_filtering_required); | |
3552 if (FLAG_trace_fragmentation) { | |
3553 PrintF(" migration slots buffer: %d\n", | |
3554 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); | |
3555 } | |
3556 | |
3557 if (compacting_ && was_marked_incrementally_) { | |
3558 // It's difficult to filter out slots recorded for large objects. | |
3559 LargeObjectIterator it(heap_->lo_space()); | |
3560 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
3561 // LargeObjectSpace is not swept yet thus we have to skip | |
3562 // dead objects explicitly. | |
3563 if (!IsMarked(obj)) continue; | |
3564 | |
3565 Page* p = Page::FromAddress(obj->address()); | |
3566 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | |
3567 obj->Iterate(&updating_visitor); | |
3568 p->ClearFlag(Page::RESCAN_ON_EVACUATION); | |
3569 } | |
3570 } | |
3571 } | |
3572 } | |
3573 | |
3574 int npages = evacuation_candidates_.length(); | |
3575 { GCTracer::Scope gc_scope( | |
3576 heap()->tracer(), GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED); | |
3577 for (int i = 0; i < npages; i++) { | |
3578 Page* p = evacuation_candidates_[i]; | |
3579 DCHECK(p->IsEvacuationCandidate() || | |
3580 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | |
3581 | |
3582 if (p->IsEvacuationCandidate()) { | |
3583 SlotsBuffer::UpdateSlotsRecordedIn(heap_, | |
3584 p->slots_buffer(), | |
3585 code_slots_filtering_required); | |
3586 if (FLAG_trace_fragmentation) { | |
3587 PrintF(" page %p slots buffer: %d\n", | |
3588 reinterpret_cast<void*>(p), | |
3589 SlotsBuffer::SizeOfChain(p->slots_buffer())); | |
3590 } | |
3591 | |
3592 // Important: skip list should be cleared only after roots were updated | |
3593 // because root iteration traverses the stack and might have to find | |
3594 // code objects from non-updated pc pointing into evacuation candidate. | |
3595 SkipList* list = p->skip_list(); | |
3596 if (list != NULL) list->Clear(); | |
3597 } else { | |
3598 if (FLAG_gc_verbose) { | |
3599 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", | |
3600 reinterpret_cast<intptr_t>(p)); | |
3601 } | |
3602 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | |
3603 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | |
3604 | |
3605 switch (space->identity()) { | |
3606 case OLD_DATA_SPACE: | |
3607 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); | |
3608 break; | |
3609 case OLD_POINTER_SPACE: | |
3610 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, | |
3611 SWEEP_ON_MAIN_THREAD, | |
3612 IGNORE_SKIP_LIST, | |
3613 IGNORE_FREE_SPACE>( | |
3614 space, NULL, p, &updating_visitor); | |
3615 break; | |
3616 case CODE_SPACE: | |
3617 if (FLAG_zap_code_space) { | |
3618 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, | |
3619 SWEEP_ON_MAIN_THREAD, | |
3620 REBUILD_SKIP_LIST, | |
3621 ZAP_FREE_SPACE>( | |
3622 space, NULL, p, &updating_visitor); | |
3623 } else { | |
3624 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, | |
3625 SWEEP_ON_MAIN_THREAD, | |
3626 REBUILD_SKIP_LIST, | |
3627 IGNORE_FREE_SPACE>( | |
3628 space, NULL, p, &updating_visitor); | |
3629 } | |
3630 break; | |
3631 default: | |
3632 UNREACHABLE(); | |
3633 break; | |
3634 } | |
3635 } | |
3636 } | |
3637 } | |
3638 | |
3639 GCTracer::Scope gc_scope(heap()->tracer(), | |
3640 GCTracer::Scope::MC_UPDATE_MISC_POINTERS); | |
3641 | |
3642 // Update pointers from cells. | |
3643 HeapObjectIterator cell_iterator(heap_->cell_space()); | |
3644 for (HeapObject* cell = cell_iterator.Next(); | |
3645 cell != NULL; | |
3646 cell = cell_iterator.Next()) { | |
3647 if (cell->IsCell()) { | |
3648 Cell::BodyDescriptor::IterateBody(cell, &updating_visitor); | |
3649 } | |
3650 } | |
3651 | |
3652 HeapObjectIterator js_global_property_cell_iterator( | |
3653 heap_->property_cell_space()); | |
3654 for (HeapObject* cell = js_global_property_cell_iterator.Next(); | |
3655 cell != NULL; | |
3656 cell = js_global_property_cell_iterator.Next()) { | |
3657 if (cell->IsPropertyCell()) { | |
3658 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor); | |
3659 } | |
3660 } | |
3661 | |
3662 heap_->string_table()->Iterate(&updating_visitor); | |
3663 updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address()); | |
3664 if (heap_->weak_object_to_code_table()->IsHashTable()) { | |
3665 WeakHashTable* table = | |
3666 WeakHashTable::cast(heap_->weak_object_to_code_table()); | |
3667 table->Iterate(&updating_visitor); | |
3668 table->Rehash(heap_->isolate()->factory()->undefined_value()); | |
3669 } | |
3670 | |
3671 // Update pointers from external string table. | |
3672 heap_->UpdateReferencesInExternalStringTable( | |
3673 &UpdateReferenceInExternalStringTableEntry); | |
3674 | |
3675 EvacuationWeakObjectRetainer evacuation_object_retainer; | |
3676 heap()->ProcessWeakReferences(&evacuation_object_retainer); | |
3677 | |
3678 // Visit invalidated code (we ignored all slots on it) and clear mark-bits | |
3679 // under it. | |
3680 ProcessInvalidatedCode(&updating_visitor); | |
3681 | |
3682 heap_->isolate()->inner_pointer_to_code_cache()->Flush(); | |
3683 | |
3684 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); | |
3685 DCHECK(migration_slots_buffer_ == NULL); | |
3686 } | |
3687 | |
3688 | |
3689 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() { | |
3690 int npages = evacuation_candidates_.length(); | |
3691 for (int i = 0; i < npages; i++) { | |
3692 Page* p = evacuation_candidates_[i]; | |
3693 if (!p->IsEvacuationCandidate()) continue; | |
3694 p->Unlink(); | |
3695 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | |
3696 p->InsertAfter(space->LastPage()); | |
3697 } | |
3698 } | |
3699 | |
3700 | |
3701 void MarkCompactCollector::ReleaseEvacuationCandidates() { | |
3702 int npages = evacuation_candidates_.length(); | |
3703 for (int i = 0; i < npages; i++) { | |
3704 Page* p = evacuation_candidates_[i]; | |
3705 if (!p->IsEvacuationCandidate()) continue; | |
3706 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | |
3707 space->Free(p->area_start(), p->area_size()); | |
3708 p->set_scan_on_scavenge(false); | |
3709 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); | |
3710 p->ResetLiveBytes(); | |
3711 space->ReleasePage(p); | |
3712 } | |
3713 evacuation_candidates_.Rewind(0); | |
3714 compacting_ = false; | |
3715 heap()->FreeQueuedChunks(); | |
3716 } | |
3717 | |
3718 | |
3719 static const int kStartTableEntriesPerLine = 5; | |
3720 static const int kStartTableLines = 171; | |
3721 static const int kStartTableInvalidLine = 127; | |
3722 static const int kStartTableUnusedEntry = 126; | |
3723 | |
3724 #define _ kStartTableUnusedEntry | |
3725 #define X kStartTableInvalidLine | |
3726 // Mark-bit to object start offset table. | |
3727 // | |
3728 // The line is indexed by the mark bits in a byte. The first number on | |
3729 // the line describes the number of live object starts for the line and the | |
3730 // other numbers on the line describe the offsets (in words) of the object | |
3731 // starts. | |
3732 // | |
3733 // Since objects are at least 2 words large we don't have entries for two | |
3734 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits. | |
3735 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = { | |
3736 0, _, _, _, _, // 0 | |
3737 1, 0, _, _, _, // 1 | |
3738 1, 1, _, _, _, // 2 | |
3739 X, _, _, _, _, // 3 | |
3740 1, 2, _, _, _, // 4 | |
3741 2, 0, 2, _, _, // 5 | |
3742 X, _, _, _, _, // 6 | |
3743 X, _, _, _, _, // 7 | |
3744 1, 3, _, _, _, // 8 | |
3745 2, 0, 3, _, _, // 9 | |
3746 2, 1, 3, _, _, // 10 | |
3747 X, _, _, _, _, // 11 | |
3748 X, _, _, _, _, // 12 | |
3749 X, _, _, _, _, // 13 | |
3750 X, _, _, _, _, // 14 | |
3751 X, _, _, _, _, // 15 | |
3752 1, 4, _, _, _, // 16 | |
3753 2, 0, 4, _, _, // 17 | |
3754 2, 1, 4, _, _, // 18 | |
3755 X, _, _, _, _, // 19 | |
3756 2, 2, 4, _, _, // 20 | |
3757 3, 0, 2, 4, _, // 21 | |
3758 X, _, _, _, _, // 22 | |
3759 X, _, _, _, _, // 23 | |
3760 X, _, _, _, _, // 24 | |
3761 X, _, _, _, _, // 25 | |
3762 X, _, _, _, _, // 26 | |
3763 X, _, _, _, _, // 27 | |
3764 X, _, _, _, _, // 28 | |
3765 X, _, _, _, _, // 29 | |
3766 X, _, _, _, _, // 30 | |
3767 X, _, _, _, _, // 31 | |
3768 1, 5, _, _, _, // 32 | |
3769 2, 0, 5, _, _, // 33 | |
3770 2, 1, 5, _, _, // 34 | |
3771 X, _, _, _, _, // 35 | |
3772 2, 2, 5, _, _, // 36 | |
3773 3, 0, 2, 5, _, // 37 | |
3774 X, _, _, _, _, // 38 | |
3775 X, _, _, _, _, // 39 | |
3776 2, 3, 5, _, _, // 40 | |
3777 3, 0, 3, 5, _, // 41 | |
3778 3, 1, 3, 5, _, // 42 | |
3779 X, _, _, _, _, // 43 | |
3780 X, _, _, _, _, // 44 | |
3781 X, _, _, _, _, // 45 | |
3782 X, _, _, _, _, // 46 | |
3783 X, _, _, _, _, // 47 | |
3784 X, _, _, _, _, // 48 | |
3785 X, _, _, _, _, // 49 | |
3786 X, _, _, _, _, // 50 | |
3787 X, _, _, _, _, // 51 | |
3788 X, _, _, _, _, // 52 | |
3789 X, _, _, _, _, // 53 | |
3790 X, _, _, _, _, // 54 | |
3791 X, _, _, _, _, // 55 | |
3792 X, _, _, _, _, // 56 | |
3793 X, _, _, _, _, // 57 | |
3794 X, _, _, _, _, // 58 | |
3795 X, _, _, _, _, // 59 | |
3796 X, _, _, _, _, // 60 | |
3797 X, _, _, _, _, // 61 | |
3798 X, _, _, _, _, // 62 | |
3799 X, _, _, _, _, // 63 | |
3800 1, 6, _, _, _, // 64 | |
3801 2, 0, 6, _, _, // 65 | |
3802 2, 1, 6, _, _, // 66 | |
3803 X, _, _, _, _, // 67 | |
3804 2, 2, 6, _, _, // 68 | |
3805 3, 0, 2, 6, _, // 69 | |
3806 X, _, _, _, _, // 70 | |
3807 X, _, _, _, _, // 71 | |
3808 2, 3, 6, _, _, // 72 | |
3809 3, 0, 3, 6, _, // 73 | |
3810 3, 1, 3, 6, _, // 74 | |
3811 X, _, _, _, _, // 75 | |
3812 X, _, _, _, _, // 76 | |
3813 X, _, _, _, _, // 77 | |
3814 X, _, _, _, _, // 78 | |
3815 X, _, _, _, _, // 79 | |
3816 2, 4, 6, _, _, // 80 | |
3817 3, 0, 4, 6, _, // 81 | |
3818 3, 1, 4, 6, _, // 82 | |
3819 X, _, _, _, _, // 83 | |
3820 3, 2, 4, 6, _, // 84 | |
3821 4, 0, 2, 4, 6, // 85 | |
3822 X, _, _, _, _, // 86 | |
3823 X, _, _, _, _, // 87 | |
3824 X, _, _, _, _, // 88 | |
3825 X, _, _, _, _, // 89 | |
3826 X, _, _, _, _, // 90 | |
3827 X, _, _, _, _, // 91 | |
3828 X, _, _, _, _, // 92 | |
3829 X, _, _, _, _, // 93 | |
3830 X, _, _, _, _, // 94 | |
3831 X, _, _, _, _, // 95 | |
3832 X, _, _, _, _, // 96 | |
3833 X, _, _, _, _, // 97 | |
3834 X, _, _, _, _, // 98 | |
3835 X, _, _, _, _, // 99 | |
3836 X, _, _, _, _, // 100 | |
3837 X, _, _, _, _, // 101 | |
3838 X, _, _, _, _, // 102 | |
3839 X, _, _, _, _, // 103 | |
3840 X, _, _, _, _, // 104 | |
3841 X, _, _, _, _, // 105 | |
3842 X, _, _, _, _, // 106 | |
3843 X, _, _, _, _, // 107 | |
3844 X, _, _, _, _, // 108 | |
3845 X, _, _, _, _, // 109 | |
3846 X, _, _, _, _, // 110 | |
3847 X, _, _, _, _, // 111 | |
3848 X, _, _, _, _, // 112 | |
3849 X, _, _, _, _, // 113 | |
3850 X, _, _, _, _, // 114 | |
3851 X, _, _, _, _, // 115 | |
3852 X, _, _, _, _, // 116 | |
3853 X, _, _, _, _, // 117 | |
3854 X, _, _, _, _, // 118 | |
3855 X, _, _, _, _, // 119 | |
3856 X, _, _, _, _, // 120 | |
3857 X, _, _, _, _, // 121 | |
3858 X, _, _, _, _, // 122 | |
3859 X, _, _, _, _, // 123 | |
3860 X, _, _, _, _, // 124 | |
3861 X, _, _, _, _, // 125 | |
3862 X, _, _, _, _, // 126 | |
3863 X, _, _, _, _, // 127 | |
3864 1, 7, _, _, _, // 128 | |
3865 2, 0, 7, _, _, // 129 | |
3866 2, 1, 7, _, _, // 130 | |
3867 X, _, _, _, _, // 131 | |
3868 2, 2, 7, _, _, // 132 | |
3869 3, 0, 2, 7, _, // 133 | |
3870 X, _, _, _, _, // 134 | |
3871 X, _, _, _, _, // 135 | |
3872 2, 3, 7, _, _, // 136 | |
3873 3, 0, 3, 7, _, // 137 | |
3874 3, 1, 3, 7, _, // 138 | |
3875 X, _, _, _, _, // 139 | |
3876 X, _, _, _, _, // 140 | |
3877 X, _, _, _, _, // 141 | |
3878 X, _, _, _, _, // 142 | |
3879 X, _, _, _, _, // 143 | |
3880 2, 4, 7, _, _, // 144 | |
3881 3, 0, 4, 7, _, // 145 | |
3882 3, 1, 4, 7, _, // 146 | |
3883 X, _, _, _, _, // 147 | |
3884 3, 2, 4, 7, _, // 148 | |
3885 4, 0, 2, 4, 7, // 149 | |
3886 X, _, _, _, _, // 150 | |
3887 X, _, _, _, _, // 151 | |
3888 X, _, _, _, _, // 152 | |
3889 X, _, _, _, _, // 153 | |
3890 X, _, _, _, _, // 154 | |
3891 X, _, _, _, _, // 155 | |
3892 X, _, _, _, _, // 156 | |
3893 X, _, _, _, _, // 157 | |
3894 X, _, _, _, _, // 158 | |
3895 X, _, _, _, _, // 159 | |
3896 2, 5, 7, _, _, // 160 | |
3897 3, 0, 5, 7, _, // 161 | |
3898 3, 1, 5, 7, _, // 162 | |
3899 X, _, _, _, _, // 163 | |
3900 3, 2, 5, 7, _, // 164 | |
3901 4, 0, 2, 5, 7, // 165 | |
3902 X, _, _, _, _, // 166 | |
3903 X, _, _, _, _, // 167 | |
3904 3, 3, 5, 7, _, // 168 | |
3905 4, 0, 3, 5, 7, // 169 | |
3906 4, 1, 3, 5, 7 // 170 | |
3907 }; | |
3908 #undef _ | |
3909 #undef X | |
3910 | |
3911 | |
3912 // Takes a word of mark bits. Returns the number of objects that start in the | |
3913 // range. Puts the offsets of the words in the supplied array. | |
3914 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) { | |
3915 int objects = 0; | |
3916 int offset = 0; | |
3917 | |
3918 // No consecutive 1 bits. | |
3919 DCHECK((mark_bits & 0x180) != 0x180); | |
3920 DCHECK((mark_bits & 0x18000) != 0x18000); | |
3921 DCHECK((mark_bits & 0x1800000) != 0x1800000); | |
3922 | |
3923 while (mark_bits != 0) { | |
3924 int byte = (mark_bits & 0xff); | |
3925 mark_bits >>= 8; | |
3926 if (byte != 0) { | |
3927 DCHECK(byte < kStartTableLines); // No consecutive 1 bits. | |
3928 char* table = kStartTable + byte * kStartTableEntriesPerLine; | |
3929 int objects_in_these_8_words = table[0]; | |
3930 DCHECK(objects_in_these_8_words != kStartTableInvalidLine); | |
3931 DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine); | |
3932 for (int i = 0; i < objects_in_these_8_words; i++) { | |
3933 starts[objects++] = offset + table[1 + i]; | |
3934 } | |
3935 } | |
3936 offset += 8; | |
3937 } | |
3938 return objects; | |
3939 } | |
3940 | |
3941 | |
3942 static inline Address DigestFreeStart(Address approximate_free_start, | |
3943 uint32_t free_start_cell) { | |
3944 DCHECK(free_start_cell != 0); | |
3945 | |
3946 // No consecutive 1 bits. | |
3947 DCHECK((free_start_cell & (free_start_cell << 1)) == 0); | |
3948 | |
3949 int offsets[16]; | |
3950 uint32_t cell = free_start_cell; | |
3951 int offset_of_last_live; | |
3952 if ((cell & 0x80000000u) != 0) { | |
3953 // This case would overflow below. | |
3954 offset_of_last_live = 31; | |
3955 } else { | |
3956 // Remove all but one bit, the most significant. This is an optimization | |
3957 // that may or may not be worthwhile. | |
3958 cell |= cell >> 16; | |
3959 cell |= cell >> 8; | |
3960 cell |= cell >> 4; | |
3961 cell |= cell >> 2; | |
3962 cell |= cell >> 1; | |
3963 cell = (cell + 1) >> 1; | |
3964 int live_objects = MarkWordToObjectStarts(cell, offsets); | |
3965 DCHECK(live_objects == 1); | |
3966 offset_of_last_live = offsets[live_objects - 1]; | |
3967 } | |
3968 Address last_live_start = | |
3969 approximate_free_start + offset_of_last_live * kPointerSize; | |
3970 HeapObject* last_live = HeapObject::FromAddress(last_live_start); | |
3971 Address free_start = last_live_start + last_live->Size(); | |
3972 return free_start; | |
3973 } | |
3974 | |
3975 | |
3976 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { | |
3977 DCHECK(cell != 0); | |
3978 | |
3979 // No consecutive 1 bits. | |
3980 DCHECK((cell & (cell << 1)) == 0); | |
3981 | |
3982 int offsets[16]; | |
3983 if (cell == 0x80000000u) { // Avoid overflow below. | |
3984 return block_address + 31 * kPointerSize; | |
3985 } | |
3986 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; | |
3987 DCHECK((first_set_bit & cell) == first_set_bit); | |
3988 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); | |
3989 DCHECK(live_objects == 1); | |
3990 USE(live_objects); | |
3991 return block_address + offsets[0] * kPointerSize; | |
3992 } | |
3993 | |
3994 | |
3995 // Force instantiation of templatized SweepConservatively method for | |
3996 // SWEEP_ON_MAIN_THREAD mode. | |
3997 template int MarkCompactCollector:: | |
3998 SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>( | |
3999 PagedSpace*, FreeList*, Page*); | |
4000 | |
4001 | |
4002 // Force instantiation of templatized SweepConservatively method for | |
4003 // SWEEP_IN_PARALLEL mode. | |
4004 template int MarkCompactCollector:: | |
4005 SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>( | |
4006 PagedSpace*, FreeList*, Page*); | |
4007 | |
4008 | |
4009 // Sweeps a space conservatively. After this has been done the larger free | |
4010 // spaces have been put on the free list and the smaller ones have been | |
4011 // ignored and left untouched. A free space is always either ignored or put | |
4012 // on the free list, never split up into two parts. This is important | |
4013 // because it means that any FreeSpace maps left actually describe a region of | |
4014 // memory that can be ignored when scanning. Dead objects other than free | |
4015 // spaces will not contain the free space map. | |
4016 template<MarkCompactCollector::SweepingParallelism mode> | |
4017 int MarkCompactCollector::SweepConservatively(PagedSpace* space, | |
4018 FreeList* free_list, | |
4019 Page* p) { | |
4020 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); | |
4021 DCHECK((mode == MarkCompactCollector::SWEEP_IN_PARALLEL && | |
4022 free_list != NULL) || | |
4023 (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD && | |
4024 free_list == NULL)); | |
4025 | |
4026 intptr_t freed_bytes = 0; | |
4027 intptr_t max_freed_bytes = 0; | |
4028 size_t size = 0; | |
4029 | |
4030 // Skip over all the dead objects at the start of the page and mark them free. | |
4031 Address cell_base = 0; | |
4032 MarkBit::CellType* cell = NULL; | |
4033 MarkBitCellIterator it(p); | |
4034 for (; !it.Done(); it.Advance()) { | |
4035 cell_base = it.CurrentCellBase(); | |
4036 cell = it.CurrentCell(); | |
4037 if (*cell != 0) break; | |
4038 } | |
4039 | |
4040 if (it.Done()) { | |
4041 size = p->area_end() - p->area_start(); | |
4042 freed_bytes = Free<mode>(space, free_list, p->area_start(), | |
4043 static_cast<int>(size)); | |
4044 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
4045 DCHECK_EQ(0, p->LiveBytes()); | |
4046 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { | |
4047 // When concurrent sweeping is active, the page will be marked after | |
4048 // sweeping by the main thread. | |
4049 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); | |
4050 } else { | |
4051 p->MarkSweptConservatively(); | |
4052 } | |
4053 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | |
4054 } | |
4055 | |
4056 // Grow the size of the start-of-page free space a little to get up to the | |
4057 // first live object. | |
4058 Address free_end = StartOfLiveObject(cell_base, *cell); | |
4059 // Free the first free space. | |
4060 size = free_end - p->area_start(); | |
4061 freed_bytes = Free<mode>(space, free_list, p->area_start(), | |
4062 static_cast<int>(size)); | |
4063 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
4064 | |
4065 // The start of the current free area is represented in undigested form by | |
4066 // the address of the last 32-word section that contained a live object and | |
4067 // the marking bitmap for that cell, which describes where the live object | |
4068 // started. Unless we find a large free space in the bitmap we will not | |
4069 // digest this pair into a real address. We start the iteration here at the | |
4070 // first word in the marking bit map that indicates a live object. | |
4071 Address free_start = cell_base; | |
4072 MarkBit::CellType free_start_cell = *cell; | |
4073 | |
4074 for (; !it.Done(); it.Advance()) { | |
4075 cell_base = it.CurrentCellBase(); | |
4076 cell = it.CurrentCell(); | |
4077 if (*cell != 0) { | |
4078 // We have a live object. Check approximately whether it is more than 32 | |
4079 // words since the last live object. | |
4080 if (cell_base - free_start > 32 * kPointerSize) { | |
4081 free_start = DigestFreeStart(free_start, free_start_cell); | |
4082 if (cell_base - free_start > 32 * kPointerSize) { | |
4083 // Now that we know the exact start of the free space it still looks | |
4084 // like we have a large enough free space to be worth bothering with. | |
4085 // so now we need to find the start of the first live object at the | |
4086 // end of the free space. | |
4087 free_end = StartOfLiveObject(cell_base, *cell); | |
4088 freed_bytes = Free<mode>(space, free_list, free_start, | |
4089 static_cast<int>(free_end - free_start)); | |
4090 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
4091 } | |
4092 } | |
4093 // Update our undigested record of where the current free area started. | |
4094 free_start = cell_base; | |
4095 free_start_cell = *cell; | |
4096 // Clear marking bits for current cell. | |
4097 *cell = 0; | |
4098 } | |
4099 } | |
4100 | |
4101 // Handle the free space at the end of the page. | |
4102 if (cell_base - free_start > 32 * kPointerSize) { | |
4103 free_start = DigestFreeStart(free_start, free_start_cell); | |
4104 freed_bytes = Free<mode>(space, free_list, free_start, | |
4105 static_cast<int>(p->area_end() - free_start)); | |
4106 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
4107 } | |
4108 | |
4109 p->ResetLiveBytes(); | |
4110 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { | |
4111 // When concurrent sweeping is active, the page will be marked after | |
4112 // sweeping by the main thread. | |
4113 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); | |
4114 } else { | |
4115 p->MarkSweptConservatively(); | |
4116 } | |
4117 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | |
4118 } | |
4119 | |
4120 | |
4121 int MarkCompactCollector::SweepInParallel(PagedSpace* space, | |
4122 int required_freed_bytes) { | |
4123 int max_freed = 0; | |
4124 int max_freed_overall = 0; | |
4125 PageIterator it(space); | |
4126 while (it.has_next()) { | |
4127 Page* p = it.next(); | |
4128 max_freed = SweepInParallel(p, space); | |
4129 DCHECK(max_freed >= 0); | |
4130 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { | |
4131 return max_freed; | |
4132 } | |
4133 max_freed_overall = Max(max_freed, max_freed_overall); | |
4134 if (p == space->end_of_unswept_pages()) break; | |
4135 } | |
4136 return max_freed_overall; | |
4137 } | |
4138 | |
4139 | |
4140 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { | |
4141 int max_freed = 0; | |
4142 if (page->TryParallelSweeping()) { | |
4143 FreeList* free_list = space == heap()->old_pointer_space() | |
4144 ? free_list_old_pointer_space_.get() | |
4145 : free_list_old_data_space_.get(); | |
4146 FreeList private_free_list(space); | |
4147 if (space->swept_precisely()) { | |
4148 max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL, | |
4149 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( | |
4150 space, &private_free_list, page, NULL); | |
4151 } else { | |
4152 max_freed = SweepConservatively<SWEEP_IN_PARALLEL>( | |
4153 space, &private_free_list, page); | |
4154 } | |
4155 free_list->Concatenate(&private_free_list); | |
4156 } | |
4157 return max_freed; | |
4158 } | |
4159 | |
4160 | |
4161 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { | |
4162 space->set_swept_precisely(sweeper == PRECISE || | |
4163 sweeper == CONCURRENT_PRECISE || | |
4164 sweeper == PARALLEL_PRECISE); | |
4165 space->ClearStats(); | |
4166 | |
4167 // We defensively initialize end_of_unswept_pages_ here with the first page | |
4168 // of the pages list. | |
4169 space->set_end_of_unswept_pages(space->FirstPage()); | |
4170 | |
4171 PageIterator it(space); | |
4172 | |
4173 int pages_swept = 0; | |
4174 bool unused_page_present = false; | |
4175 bool parallel_sweeping_active = false; | |
4176 | |
4177 while (it.has_next()) { | |
4178 Page* p = it.next(); | |
4179 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); | |
4180 | |
4181 // Clear sweeping flags indicating that marking bits are still intact. | |
4182 p->ClearSweptPrecisely(); | |
4183 p->ClearSweptConservatively(); | |
4184 | |
4185 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || | |
4186 p->IsEvacuationCandidate()) { | |
4187 // Will be processed in EvacuateNewSpaceAndCandidates. | |
4188 DCHECK(evacuation_candidates_.length() > 0); | |
4189 continue; | |
4190 } | |
4191 | |
4192 // One unused page is kept, all further are released before sweeping them. | |
4193 if (p->LiveBytes() == 0) { | |
4194 if (unused_page_present) { | |
4195 if (FLAG_gc_verbose) { | |
4196 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", | |
4197 reinterpret_cast<intptr_t>(p)); | |
4198 } | |
4199 // Adjust unswept free bytes because releasing a page expects said | |
4200 // counter to be accurate for unswept pages. | |
4201 space->IncreaseUnsweptFreeBytes(p); | |
4202 space->ReleasePage(p); | |
4203 continue; | |
4204 } | |
4205 unused_page_present = true; | |
4206 } | |
4207 | |
4208 switch (sweeper) { | |
4209 case CONCURRENT_CONSERVATIVE: | |
4210 case PARALLEL_CONSERVATIVE: { | |
4211 if (!parallel_sweeping_active) { | |
4212 if (FLAG_gc_verbose) { | |
4213 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", | |
4214 reinterpret_cast<intptr_t>(p)); | |
4215 } | |
4216 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); | |
4217 pages_swept++; | |
4218 parallel_sweeping_active = true; | |
4219 } else { | |
4220 if (FLAG_gc_verbose) { | |
4221 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", | |
4222 reinterpret_cast<intptr_t>(p)); | |
4223 } | |
4224 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); | |
4225 space->IncreaseUnsweptFreeBytes(p); | |
4226 } | |
4227 space->set_end_of_unswept_pages(p); | |
4228 break; | |
4229 } | |
4230 case CONCURRENT_PRECISE: | |
4231 case PARALLEL_PRECISE: | |
4232 if (!parallel_sweeping_active) { | |
4233 if (FLAG_gc_verbose) { | |
4234 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", | |
4235 reinterpret_cast<intptr_t>(p)); | |
4236 } | |
4237 SweepPrecisely<SWEEP_ONLY, | |
4238 SWEEP_ON_MAIN_THREAD, | |
4239 IGNORE_SKIP_LIST, | |
4240 IGNORE_FREE_SPACE>(space, NULL, p, NULL); | |
4241 pages_swept++; | |
4242 parallel_sweeping_active = true; | |
4243 } else { | |
4244 if (FLAG_gc_verbose) { | |
4245 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", | |
4246 reinterpret_cast<intptr_t>(p)); | |
4247 } | |
4248 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); | |
4249 space->IncreaseUnsweptFreeBytes(p); | |
4250 } | |
4251 space->set_end_of_unswept_pages(p); | |
4252 break; | |
4253 case PRECISE: { | |
4254 if (FLAG_gc_verbose) { | |
4255 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", | |
4256 reinterpret_cast<intptr_t>(p)); | |
4257 } | |
4258 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) { | |
4259 SweepPrecisely<SWEEP_ONLY, | |
4260 SWEEP_ON_MAIN_THREAD, | |
4261 REBUILD_SKIP_LIST, | |
4262 ZAP_FREE_SPACE>(space, NULL, p, NULL); | |
4263 } else if (space->identity() == CODE_SPACE) { | |
4264 SweepPrecisely<SWEEP_ONLY, | |
4265 SWEEP_ON_MAIN_THREAD, | |
4266 REBUILD_SKIP_LIST, | |
4267 IGNORE_FREE_SPACE>(space, NULL, p, NULL); | |
4268 } else { | |
4269 SweepPrecisely<SWEEP_ONLY, | |
4270 SWEEP_ON_MAIN_THREAD, | |
4271 IGNORE_SKIP_LIST, | |
4272 IGNORE_FREE_SPACE>(space, NULL, p, NULL); | |
4273 } | |
4274 pages_swept++; | |
4275 break; | |
4276 } | |
4277 default: { | |
4278 UNREACHABLE(); | |
4279 } | |
4280 } | |
4281 } | |
4282 | |
4283 if (FLAG_gc_verbose) { | |
4284 PrintF("SweepSpace: %s (%d pages swept)\n", | |
4285 AllocationSpaceName(space->identity()), | |
4286 pages_swept); | |
4287 } | |
4288 | |
4289 // Give pages that are queued to be freed back to the OS. | |
4290 heap()->FreeQueuedChunks(); | |
4291 } | |
4292 | |
4293 | |
4294 static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) { | |
4295 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE || | |
4296 type == MarkCompactCollector::CONCURRENT_CONSERVATIVE || | |
4297 type == MarkCompactCollector::PARALLEL_PRECISE || | |
4298 type == MarkCompactCollector::CONCURRENT_PRECISE; | |
4299 } | |
4300 | |
4301 | |
4302 static bool ShouldWaitForSweeperThreads( | |
4303 MarkCompactCollector::SweeperType type) { | |
4304 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE || | |
4305 type == MarkCompactCollector::PARALLEL_PRECISE; | |
4306 } | |
4307 | |
4308 | |
4309 void MarkCompactCollector::SweepSpaces() { | |
4310 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP); | |
4311 double start_time = 0.0; | |
4312 if (FLAG_print_cumulative_gc_stat) { | |
4313 start_time = base::OS::TimeCurrentMillis(); | |
4314 } | |
4315 | |
4316 #ifdef DEBUG | |
4317 state_ = SWEEP_SPACES; | |
4318 #endif | |
4319 SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE; | |
4320 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; | |
4321 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; | |
4322 if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) { | |
4323 how_to_sweep = PARALLEL_PRECISE; | |
4324 } | |
4325 if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) { | |
4326 how_to_sweep = CONCURRENT_PRECISE; | |
4327 } | |
4328 if (sweep_precisely_) how_to_sweep = PRECISE; | |
4329 | |
4330 MoveEvacuationCandidatesToEndOfPagesList(); | |
4331 | |
4332 // Noncompacting collections simply sweep the spaces to clear the mark | |
4333 // bits and free the nonlive blocks (for old and map spaces). We sweep | |
4334 // the map space last because freeing non-live maps overwrites them and | |
4335 // the other spaces rely on possibly non-live maps to get the sizes for | |
4336 // non-live objects. | |
4337 { GCTracer::Scope sweep_scope(heap()->tracer(), | |
4338 GCTracer::Scope::MC_SWEEP_OLDSPACE); | |
4339 { SequentialSweepingScope scope(this); | |
4340 SweepSpace(heap()->old_pointer_space(), how_to_sweep); | |
4341 SweepSpace(heap()->old_data_space(), how_to_sweep); | |
4342 } | |
4343 | |
4344 if (ShouldStartSweeperThreads(how_to_sweep)) { | |
4345 StartSweeperThreads(); | |
4346 } | |
4347 | |
4348 if (ShouldWaitForSweeperThreads(how_to_sweep)) { | |
4349 EnsureSweepingCompleted(); | |
4350 } | |
4351 } | |
4352 RemoveDeadInvalidatedCode(); | |
4353 | |
4354 { GCTracer::Scope sweep_scope(heap()->tracer(), | |
4355 GCTracer::Scope::MC_SWEEP_CODE); | |
4356 SweepSpace(heap()->code_space(), PRECISE); | |
4357 } | |
4358 | |
4359 { GCTracer::Scope sweep_scope(heap()->tracer(), | |
4360 GCTracer::Scope::MC_SWEEP_CELL); | |
4361 SweepSpace(heap()->cell_space(), PRECISE); | |
4362 SweepSpace(heap()->property_cell_space(), PRECISE); | |
4363 } | |
4364 | |
4365 EvacuateNewSpaceAndCandidates(); | |
4366 | |
4367 // ClearNonLiveTransitions depends on precise sweeping of map space to | |
4368 // detect whether unmarked map became dead in this collection or in one | |
4369 // of the previous ones. | |
4370 { GCTracer::Scope sweep_scope(heap()->tracer(), | |
4371 GCTracer::Scope::MC_SWEEP_MAP); | |
4372 SweepSpace(heap()->map_space(), PRECISE); | |
4373 } | |
4374 | |
4375 // Deallocate unmarked objects and clear marked bits for marked objects. | |
4376 heap_->lo_space()->FreeUnmarkedObjects(); | |
4377 | |
4378 // Deallocate evacuated candidate pages. | |
4379 ReleaseEvacuationCandidates(); | |
4380 | |
4381 if (FLAG_print_cumulative_gc_stat) { | |
4382 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - | |
4383 start_time); | |
4384 } | |
4385 } | |
4386 | |
4387 | |
4388 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { | |
4389 PageIterator it(space); | |
4390 while (it.has_next()) { | |
4391 Page* p = it.next(); | |
4392 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) { | |
4393 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE); | |
4394 if (space->swept_precisely()) { | |
4395 p->MarkSweptPrecisely(); | |
4396 } else { | |
4397 p->MarkSweptConservatively(); | |
4398 } | |
4399 } | |
4400 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); | |
4401 } | |
4402 } | |
4403 | |
4404 | |
4405 void MarkCompactCollector::ParallelSweepSpacesComplete() { | |
4406 ParallelSweepSpaceComplete(heap()->old_pointer_space()); | |
4407 ParallelSweepSpaceComplete(heap()->old_data_space()); | |
4408 } | |
4409 | |
4410 | |
4411 void MarkCompactCollector::EnableCodeFlushing(bool enable) { | |
4412 if (isolate()->debug()->is_loaded() || | |
4413 isolate()->debug()->has_break_points()) { | |
4414 enable = false; | |
4415 } | |
4416 | |
4417 if (enable) { | |
4418 if (code_flusher_ != NULL) return; | |
4419 code_flusher_ = new CodeFlusher(isolate()); | |
4420 } else { | |
4421 if (code_flusher_ == NULL) return; | |
4422 code_flusher_->EvictAllCandidates(); | |
4423 delete code_flusher_; | |
4424 code_flusher_ = NULL; | |
4425 } | |
4426 | |
4427 if (FLAG_trace_code_flushing) { | |
4428 PrintF("[code-flushing is now %s]\n", enable ? "on" : "off"); | |
4429 } | |
4430 } | |
4431 | |
4432 | |
4433 // TODO(1466) ReportDeleteIfNeeded is not called currently. | |
4434 // Our profiling tools do not expect intersections between | |
4435 // code objects. We should either reenable it or change our tools. | |
4436 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, | |
4437 Isolate* isolate) { | |
4438 if (obj->IsCode()) { | |
4439 PROFILE(isolate, CodeDeleteEvent(obj->address())); | |
4440 } | |
4441 } | |
4442 | |
4443 | |
4444 Isolate* MarkCompactCollector::isolate() const { | |
4445 return heap_->isolate(); | |
4446 } | |
4447 | |
4448 | |
4449 void MarkCompactCollector::Initialize() { | |
4450 MarkCompactMarkingVisitor::Initialize(); | |
4451 IncrementalMarking::Initialize(); | |
4452 } | |
4453 | |
4454 | |
4455 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) { | |
4456 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES; | |
4457 } | |
4458 | |
4459 | |
4460 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator, | |
4461 SlotsBuffer** buffer_address, | |
4462 SlotType type, | |
4463 Address addr, | |
4464 AdditionMode mode) { | |
4465 SlotsBuffer* buffer = *buffer_address; | |
4466 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) { | |
4467 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { | |
4468 allocator->DeallocateChain(buffer_address); | |
4469 return false; | |
4470 } | |
4471 buffer = allocator->AllocateBuffer(buffer); | |
4472 *buffer_address = buffer; | |
4473 } | |
4474 DCHECK(buffer->HasSpaceForTypedSlot()); | |
4475 buffer->Add(reinterpret_cast<ObjectSlot>(type)); | |
4476 buffer->Add(reinterpret_cast<ObjectSlot>(addr)); | |
4477 return true; | |
4478 } | |
4479 | |
4480 | |
4481 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { | |
4482 if (RelocInfo::IsCodeTarget(rmode)) { | |
4483 return SlotsBuffer::CODE_TARGET_SLOT; | |
4484 } else if (RelocInfo::IsEmbeddedObject(rmode)) { | |
4485 return SlotsBuffer::EMBEDDED_OBJECT_SLOT; | |
4486 } else if (RelocInfo::IsDebugBreakSlot(rmode)) { | |
4487 return SlotsBuffer::DEBUG_TARGET_SLOT; | |
4488 } else if (RelocInfo::IsJSReturn(rmode)) { | |
4489 return SlotsBuffer::JS_RETURN_SLOT; | |
4490 } | |
4491 UNREACHABLE(); | |
4492 return SlotsBuffer::NUMBER_OF_SLOT_TYPES; | |
4493 } | |
4494 | |
4495 | |
4496 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) { | |
4497 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); | |
4498 RelocInfo::Mode rmode = rinfo->rmode(); | |
4499 if (target_page->IsEvacuationCandidate() && | |
4500 (rinfo->host() == NULL || | |
4501 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) { | |
4502 bool success; | |
4503 if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) { | |
4504 // This doesn't need to be typed since it is just a normal heap pointer. | |
4505 Object** target_pointer = | |
4506 reinterpret_cast<Object**>(rinfo->constant_pool_entry_address()); | |
4507 success = SlotsBuffer::AddTo(&slots_buffer_allocator_, | |
4508 target_page->slots_buffer_address(), | |
4509 target_pointer, | |
4510 SlotsBuffer::FAIL_ON_OVERFLOW); | |
4511 } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) { | |
4512 success = SlotsBuffer::AddTo(&slots_buffer_allocator_, | |
4513 target_page->slots_buffer_address(), | |
4514 SlotsBuffer::CODE_ENTRY_SLOT, | |
4515 rinfo->constant_pool_entry_address(), | |
4516 SlotsBuffer::FAIL_ON_OVERFLOW); | |
4517 } else { | |
4518 success = SlotsBuffer::AddTo(&slots_buffer_allocator_, | |
4519 target_page->slots_buffer_address(), | |
4520 SlotTypeForRMode(rmode), | |
4521 rinfo->pc(), | |
4522 SlotsBuffer::FAIL_ON_OVERFLOW); | |
4523 } | |
4524 if (!success) { | |
4525 EvictEvacuationCandidate(target_page); | |
4526 } | |
4527 } | |
4528 } | |
4529 | |
4530 | |
4531 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) { | |
4532 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); | |
4533 if (target_page->IsEvacuationCandidate() && | |
4534 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) { | |
4535 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, | |
4536 target_page->slots_buffer_address(), | |
4537 SlotsBuffer::CODE_ENTRY_SLOT, | |
4538 slot, | |
4539 SlotsBuffer::FAIL_ON_OVERFLOW)) { | |
4540 EvictEvacuationCandidate(target_page); | |
4541 } | |
4542 } | |
4543 } | |
4544 | |
4545 | |
4546 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) { | |
4547 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT); | |
4548 if (is_compacting()) { | |
4549 Code* host = isolate()->inner_pointer_to_code_cache()-> | |
4550 GcSafeFindCodeForInnerPointer(pc); | |
4551 MarkBit mark_bit = Marking::MarkBitFrom(host); | |
4552 if (Marking::IsBlack(mark_bit)) { | |
4553 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | |
4554 RecordRelocSlot(&rinfo, target); | |
4555 } | |
4556 } | |
4557 } | |
4558 | |
4559 | |
4560 static inline SlotsBuffer::SlotType DecodeSlotType( | |
4561 SlotsBuffer::ObjectSlot slot) { | |
4562 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot)); | |
4563 } | |
4564 | |
4565 | |
4566 void SlotsBuffer::UpdateSlots(Heap* heap) { | |
4567 PointersUpdatingVisitor v(heap); | |
4568 | |
4569 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { | |
4570 ObjectSlot slot = slots_[slot_idx]; | |
4571 if (!IsTypedSlot(slot)) { | |
4572 PointersUpdatingVisitor::UpdateSlot(heap, slot); | |
4573 } else { | |
4574 ++slot_idx; | |
4575 DCHECK(slot_idx < idx_); | |
4576 UpdateSlot(heap->isolate(), | |
4577 &v, | |
4578 DecodeSlotType(slot), | |
4579 reinterpret_cast<Address>(slots_[slot_idx])); | |
4580 } | |
4581 } | |
4582 } | |
4583 | |
4584 | |
4585 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) { | |
4586 PointersUpdatingVisitor v(heap); | |
4587 | |
4588 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { | |
4589 ObjectSlot slot = slots_[slot_idx]; | |
4590 if (!IsTypedSlot(slot)) { | |
4591 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) { | |
4592 PointersUpdatingVisitor::UpdateSlot(heap, slot); | |
4593 } | |
4594 } else { | |
4595 ++slot_idx; | |
4596 DCHECK(slot_idx < idx_); | |
4597 Address pc = reinterpret_cast<Address>(slots_[slot_idx]); | |
4598 if (!IsOnInvalidatedCodeObject(pc)) { | |
4599 UpdateSlot(heap->isolate(), | |
4600 &v, | |
4601 DecodeSlotType(slot), | |
4602 reinterpret_cast<Address>(slots_[slot_idx])); | |
4603 } | |
4604 } | |
4605 } | |
4606 } | |
4607 | |
4608 | |
4609 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { | |
4610 return new SlotsBuffer(next_buffer); | |
4611 } | |
4612 | |
4613 | |
4614 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { | |
4615 delete buffer; | |
4616 } | |
4617 | |
4618 | |
4619 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { | |
4620 SlotsBuffer* buffer = *buffer_address; | |
4621 while (buffer != NULL) { | |
4622 SlotsBuffer* next_buffer = buffer->next(); | |
4623 DeallocateBuffer(buffer); | |
4624 buffer = next_buffer; | |
4625 } | |
4626 *buffer_address = NULL; | |
4627 } | |
4628 | |
4629 | |
4630 } } // namespace v8::internal | |
OLD | NEW |