Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/mark-compact.cc

Issue 7834018: Support compaction for code space pages. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: port changes from ia32 to arm & x64 Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after
230 230
231 231
232 bool MarkCompactCollector::StartCompaction() { 232 bool MarkCompactCollector::StartCompaction() {
233 // Don't start compaction if we are in the middle of incremental 233 // Don't start compaction if we are in the middle of incremental
234 // marking cycle. We did not collect any slots. 234 // marking cycle. We did not collect any slots.
235 if (!compacting_ && !heap_->incremental_marking()->IsMarking()) { 235 if (!compacting_ && !heap_->incremental_marking()->IsMarking()) {
236 ASSERT(evacuation_candidates_.length() == 0); 236 ASSERT(evacuation_candidates_.length() == 0);
237 237
238 CollectEvacuationCandidates(heap()->old_pointer_space()); 238 CollectEvacuationCandidates(heap()->old_pointer_space());
239 CollectEvacuationCandidates(heap()->old_data_space()); 239 CollectEvacuationCandidates(heap()->old_data_space());
240 CollectEvacuationCandidates(heap()->code_space());
240 241
241 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); 242 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
242 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); 243 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
244 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
243 245
244 compacting_ = evacuation_candidates_.length() > 0; 246 compacting_ = evacuation_candidates_.length() > 0;
245 } 247 }
246 248
247 return compacting_; 249 return compacting_;
248 } 250 }
249 251
250 252
251 void MarkCompactCollector::CollectGarbage() { 253 void MarkCompactCollector::CollectGarbage() {
252 // Make sure that Prepare() has been called. The individual steps below will 254 // Make sure that Prepare() has been called. The individual steps below will
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
372 } 374 }
373 MarkBit old_mark_bit = MarkBitFrom(old_start); 375 MarkBit old_mark_bit = MarkBitFrom(old_start);
374 if (!old_mark_bit.Get()) { 376 if (!old_mark_bit.Get()) {
375 return false; 377 return false;
376 } 378 }
377 new_mark_bit.Set(); 379 new_mark_bit.Set();
378 return true; 380 return true;
379 } 381 }
380 382
381 383
384 static const char* AllocationSpaceName(AllocationSpace space) {
385 switch (space) {
386 case NEW_SPACE: return "NEW_SPACE";
387 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
388 case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
389 case CODE_SPACE: return "CODE_SPACE";
390 case MAP_SPACE: return "MAP_SPACE";
391 case CELL_SPACE: return "CELL_SPACE";
392 case LO_SPACE: return "LO_SPACE";
393 default:
394 UNREACHABLE();
395 }
396
397 return NULL;
398 }
399
400
382 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { 401 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
383 ASSERT(space->identity() == OLD_POINTER_SPACE || 402 ASSERT(space->identity() == OLD_POINTER_SPACE ||
384 space->identity() == OLD_DATA_SPACE); 403 space->identity() == OLD_DATA_SPACE ||
404 space->identity() == CODE_SPACE);
385 405
386 PageIterator it(space); 406 PageIterator it(space);
407 int count = 0;
387 if (it.has_next()) it.next(); // Never compact the first page. 408 if (it.has_next()) it.next(); // Never compact the first page.
388 while (it.has_next()) { 409 while (it.has_next()) {
389 Page* p = it.next(); 410 Page* p = it.next();
390 if (space->IsFragmented(p)) { 411 if (space->IsFragmented(p)) {
391 AddEvacuationCandidate(p); 412 AddEvacuationCandidate(p);
413 count++;
392 } else { 414 } else {
393 p->ClearEvacuationCandidate(); 415 p->ClearEvacuationCandidate();
394 } 416 }
395 } 417 }
418
419 if (count > 0 && FLAG_trace_fragmentation) {
420 PrintF("Collected %d evacuation candidates for space %s\n",
421 count,
422 AllocationSpaceName(space->identity()));
423 }
396 } 424 }
397 425
398 426
399 void MarkCompactCollector::Prepare(GCTracer* tracer) { 427 void MarkCompactCollector::Prepare(GCTracer* tracer) {
400 FLAG_flush_code = false; 428 FLAG_flush_code = false;
401 FLAG_always_compact = false; 429 FLAG_always_compact = false;
402 430
403 // Disable collection of maps if incremental marking is enabled. 431 // Disable collection of maps if incremental marking is enabled.
404 // Map collection algorithm relies on a special map transition tree traversal 432 // Map collection algorithm relies on a special map transition tree traversal
405 // order which is not implemented for incremental marking. 433 // order which is not implemented for incremental marking.
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
539 567
540 Code* code = shared->unchecked_code(); 568 Code* code = shared->unchecked_code();
541 MarkBit code_mark = Marking::MarkBitFrom(code); 569 MarkBit code_mark = Marking::MarkBitFrom(code);
542 if (!code_mark.Get()) { 570 if (!code_mark.Get()) {
543 shared->set_code(lazy_compile); 571 shared->set_code(lazy_compile);
544 candidate->set_code(lazy_compile); 572 candidate->set_code(lazy_compile);
545 } else { 573 } else {
546 candidate->set_code(shared->unchecked_code()); 574 candidate->set_code(shared->unchecked_code());
547 } 575 }
548 576
577 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
Erik Corry 2011/09/06 14:10:31 This is kind of subtle. Comment?
Vyacheslav Egorov (Chromium) 2011/09/06 15:01:42 Done.
578 isolate_->heap()->mark_compact_collector()->
579 RecordCodeEntrySlot(slot, Code::cast(Memory::Object_at(slot)));
580
549 candidate = next_candidate; 581 candidate = next_candidate;
550 } 582 }
551 583
552 jsfunction_candidates_head_ = NULL; 584 jsfunction_candidates_head_ = NULL;
553 } 585 }
554 586
555 587
556 void ProcessSharedFunctionInfoCandidates() { 588 void ProcessSharedFunctionInfoCandidates() {
557 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); 589 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
558 590
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after
737 if (end - start >= kMinRangeForMarkingRecursion) { 769 if (end - start >= kMinRangeForMarkingRecursion) {
738 if (VisitUnmarkedObjects(heap, start, end)) return; 770 if (VisitUnmarkedObjects(heap, start, end)) return;
739 // We are close to a stack overflow, so just mark the objects. 771 // We are close to a stack overflow, so just mark the objects.
740 } 772 }
741 MarkCompactCollector* collector = heap->mark_compact_collector(); 773 MarkCompactCollector* collector = heap->mark_compact_collector();
742 for (Object** p = start; p < end; p++) { 774 for (Object** p = start; p < end; p++) {
743 MarkObjectByPointer(collector, start, p); 775 MarkObjectByPointer(collector, start, p);
744 } 776 }
745 } 777 }
746 778
747 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
748 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
749 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
750 if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
751 IC::Clear(rinfo->pc());
752 // Please note targets for cleared inline cached do not have to be
753 // marked since they are contained in HEAP->non_monomorphic_cache().
754 } else {
755 MarkBit code_mark = Marking::MarkBitFrom(code);
756 heap->mark_compact_collector()->MarkObject(code, code_mark);
757 }
758 }
759
760 static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) { 779 static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
761 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); 780 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
762 Object* cell = rinfo->target_cell(); 781 JSGlobalPropertyCell* cell =
763 Object* old_cell = cell; 782 JSGlobalPropertyCell::cast(rinfo->target_cell());
764 VisitPointer(heap, &cell); 783 MarkBit mark = Marking::MarkBitFrom(cell);
765 if (cell != old_cell) { 784 heap->mark_compact_collector()->MarkObject(cell, mark);
766 rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell), 785 }
767 NULL); 786
787 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
788 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
789 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
790 if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) {
791 IC::Clear(rinfo->pc());
792 // Please note targets for cleared inline cached do not have to be
793 // marked since they are contained in HEAP->non_monomorphic_cache().
794 target = Code::GetCodeFromTargetAddress(rinfo->target_address());
795 } else {
796 MarkBit code_mark = Marking::MarkBitFrom(target);
797 heap->mark_compact_collector()->MarkObject(target, code_mark);
768 } 798 }
799 heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
769 } 800 }
770 801
771 static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) { 802 static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
772 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && 803 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
773 rinfo->IsPatchedReturnSequence()) || 804 rinfo->IsPatchedReturnSequence()) ||
774 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && 805 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
775 rinfo->IsPatchedDebugBreakSlotSequence())); 806 rinfo->IsPatchedDebugBreakSlotSequence()));
776 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address()); 807 Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
777 MarkBit code_mark = Marking::MarkBitFrom(code); 808 MarkBit code_mark = Marking::MarkBitFrom(target);
778 heap->mark_compact_collector()->MarkObject(code, code_mark); 809 heap->mark_compact_collector()->MarkObject(target, code_mark);
810 heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
779 } 811 }
780 812
781 // Mark object pointed to by p. 813 // Mark object pointed to by p.
782 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, 814 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
783 Object** anchor_slot, 815 Object** anchor_slot,
784 Object** p)) { 816 Object** p)) {
785 if (!(*p)->IsHeapObject()) return; 817 if (!(*p)->IsHeapObject()) return;
786 HeapObject* object = ShortCircuitConsString(p); 818 HeapObject* object = ShortCircuitConsString(p);
787 collector->RecordSlot(anchor_slot, p, object); 819 collector->RecordSlot(anchor_slot, p, object);
788 MarkBit mark = Marking::MarkBitFrom(object); 820 MarkBit mark = Marking::MarkBitFrom(object);
(...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after
1088 if (known_flush_code_candidate) { 1120 if (known_flush_code_candidate) {
1089 heap->mark_compact_collector()->code_flusher()->AddCandidate(shared); 1121 heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
1090 } 1122 }
1091 } 1123 }
1092 1124
1093 VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate); 1125 VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
1094 } 1126 }
1095 1127
1096 1128
1097 static void VisitCodeEntry(Heap* heap, Address entry_address) { 1129 static void VisitCodeEntry(Heap* heap, Address entry_address) {
1098 Object* code = Code::GetObjectFromEntryAddress(entry_address); 1130 Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
1099 Object* old_code = code; 1131 MarkBit mark = Marking::MarkBitFrom(code);
1100 VisitPointer(heap, &code); 1132 heap->mark_compact_collector()->MarkObject(code, mark);
1101 if (code != old_code) { 1133 heap->mark_compact_collector()->
1102 Memory::Address_at(entry_address) = 1134 RecordCodeEntrySlot(entry_address, code);
1103 reinterpret_cast<Code*>(code)->entry();
1104 }
1105 } 1135 }
1106 1136
1107 static void VisitGlobalContext(Map* map, HeapObject* object) { 1137 static void VisitGlobalContext(Map* map, HeapObject* object) {
1108 FixedBodyVisitor<StaticMarkingVisitor, 1138 FixedBodyVisitor<StaticMarkingVisitor,
1109 Context::MarkCompactBodyDescriptor, 1139 Context::MarkCompactBodyDescriptor,
1110 void>::Visit(map, object); 1140 void>::Visit(map, object);
1111 1141
1112 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); 1142 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1113 for (int idx = Context::FIRST_WEAK_SLOT; 1143 for (int idx = Context::FIRST_WEAK_SLOT;
1114 idx < Context::GLOBAL_CONTEXT_SLOTS; 1144 idx < Context::GLOBAL_CONTEXT_SLOTS;
(...skipping 1146 matching lines...) Expand 10 before | Expand all | Expand 10 after
2261 // The second pass updates pointers to new space in all spaces. It is possible 2291 // The second pass updates pointers to new space in all spaces. It is possible
2262 // to encounter pointers to dead new space objects during traversal of pointers 2292 // to encounter pointers to dead new space objects during traversal of pointers
2263 // to new space. We should clear them to avoid encountering them during next 2293 // to new space. We should clear them to avoid encountering them during next
2264 // pointer iteration. This is an issue if the store buffer overflows and we 2294 // pointer iteration. This is an issue if the store buffer overflows and we
2265 // have to scan the entire old space, including dead objects, looking for 2295 // have to scan the entire old space, including dead objects, looking for
2266 // pointers to new space. 2296 // pointers to new space.
2267 void MarkCompactCollector::MigrateObject(Address dst, 2297 void MarkCompactCollector::MigrateObject(Address dst,
2268 Address src, 2298 Address src,
2269 int size, 2299 int size,
2270 AllocationSpace dest) { 2300 AllocationSpace dest) {
2271 ASSERT(dest == OLD_POINTER_SPACE ||
2272 dest == OLD_DATA_SPACE ||
2273 dest == LO_SPACE ||
2274 dest == NEW_SPACE);
2275
2276 if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) { 2301 if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
2277 Address src_slot = src; 2302 Address src_slot = src;
2278 Address dst_slot = dst; 2303 Address dst_slot = dst;
2279 ASSERT(IsAligned(size, kPointerSize)); 2304 ASSERT(IsAligned(size, kPointerSize));
2280 2305
2281 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { 2306 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2282 Object* value = Memory::Object_at(src_slot); 2307 Object* value = Memory::Object_at(src_slot);
2283 2308
2284 Memory::Object_at(dst_slot) = value; 2309 Memory::Object_at(dst_slot) = value;
2285 2310
2286 if (heap_->InNewSpace(value)) { 2311 if (heap_->InNewSpace(value)) {
2287 heap_->store_buffer()->Mark(dst_slot); 2312 heap_->store_buffer()->Mark(dst_slot);
2288 } else if (value->IsHeapObject() && 2313 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2289 MarkCompactCollector::IsOnEvacuationCandidate(value)) {
2290 SlotsBuffer::AddTo(&slots_buffer_allocator_, 2314 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2291 &migration_slots_buffer_, 2315 &migration_slots_buffer_,
2292 reinterpret_cast<Object**>(dst_slot), 2316 reinterpret_cast<Object**>(dst_slot),
2293 SlotsBuffer::IGNORE_OVERFLOW); 2317 SlotsBuffer::IGNORE_OVERFLOW);
2294 } 2318 }
2295 2319
2296 src_slot += kPointerSize; 2320 src_slot += kPointerSize;
2297 dst_slot += kPointerSize; 2321 dst_slot += kPointerSize;
2298 } 2322 }
2323
2324 if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2325 Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2326 Address code_entry = Memory::Address_at(code_entry_slot);
2327
2328 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2329 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2330 &migration_slots_buffer_,
2331 SlotsBuffer::CODE_ENTRY_SLOT,
2332 code_entry_slot,
2333 SlotsBuffer::IGNORE_OVERFLOW);
2334 }
2335 }
2336 } else if (dest == CODE_SPACE) {
2337 PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
2338 heap()->MoveBlock(dst, src, size);
2339 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2340 &migration_slots_buffer_,
2341 SlotsBuffer::RELOCATED_CODE_OBJECT,
2342 dst,
2343 SlotsBuffer::IGNORE_OVERFLOW);
2344 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2299 } else { 2345 } else {
2300 heap_->CopyBlock(dst, src, size); 2346 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2347 heap()->MoveBlock(dst, src, size);
2301 } 2348 }
2302 Memory::Address_at(src) = dst; 2349 Memory::Address_at(src) = dst;
2303 } 2350 }
2304 2351
2305 2352
2306 // Visitor for updating pointers from live objects in old spaces to new space. 2353 // Visitor for updating pointers from live objects in old spaces to new space.
2307 // It does not expect to encounter pointers to dead objects. 2354 // It does not expect to encounter pointers to dead objects.
2308 class PointersUpdatingVisitor: public ObjectVisitor { 2355 class PointersUpdatingVisitor: public ObjectVisitor {
2309 public: 2356 public:
2310 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { } 2357 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2311 2358
2312 void VisitPointer(Object** p) { 2359 void VisitPointer(Object** p) {
2313 UpdatePointer(p); 2360 UpdatePointer(p);
2314 } 2361 }
2315 2362
2316 void VisitPointers(Object** start, Object** end) { 2363 void VisitPointers(Object** start, Object** end) {
2317 for (Object** p = start; p < end; p++) UpdatePointer(p); 2364 for (Object** p = start; p < end; p++) UpdatePointer(p);
2318 } 2365 }
2319 2366
2320 void VisitCodeTarget(RelocInfo* rinfo) { 2367 void VisitCodeTarget(RelocInfo* rinfo) {
2321 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); 2368 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2322 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); 2369 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2323 VisitPointer(&target); 2370 VisitPointer(&target);
2324 rinfo->set_target_address(Code::cast(target)->instruction_start(), NULL); 2371 rinfo->set_target_address(Code::cast(target)->instruction_start());
2325 } 2372 }
2326 2373
2327 void VisitDebugTarget(RelocInfo* rinfo) { 2374 void VisitDebugTarget(RelocInfo* rinfo) {
2328 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && 2375 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2329 rinfo->IsPatchedReturnSequence()) || 2376 rinfo->IsPatchedReturnSequence()) ||
2330 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && 2377 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2331 rinfo->IsPatchedDebugBreakSlotSequence())); 2378 rinfo->IsPatchedDebugBreakSlotSequence()));
2332 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); 2379 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2333 VisitPointer(&target); 2380 VisitPointer(&target);
2334 rinfo->set_call_address(Code::cast(target)->instruction_start()); 2381 rinfo->set_call_address(Code::cast(target)->instruction_start());
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after
2577 2624
2578 MapWord map_word = heap_obj->map_word(); 2625 MapWord map_word = heap_obj->map_word();
2579 if (map_word.IsForwardingAddress()) { 2626 if (map_word.IsForwardingAddress()) {
2580 ASSERT(MarkCompactCollector::IsOnEvacuationCandidate(*slot)); 2627 ASSERT(MarkCompactCollector::IsOnEvacuationCandidate(*slot));
2581 *slot = map_word.ToForwardingAddress(); 2628 *slot = map_word.ToForwardingAddress();
2582 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot)); 2629 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot));
2583 } 2630 }
2584 } 2631 }
2585 2632
2586 2633
2634 static inline void UpdateSlot(ObjectVisitor* v,
2635 SlotsBuffer::SlotType slot_type,
2636 Address addr) {
2637 switch (slot_type) {
2638 case SlotsBuffer::CODE_TARGET_SLOT: {
2639 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, NULL, NULL);
2640 rinfo.Visit(v);
2641 break;
2642 }
2643 case SlotsBuffer::CODE_ENTRY_SLOT: {
2644 v->VisitCodeEntry(addr);
2645 break;
2646 }
2647 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
2648 HeapObject* obj = HeapObject::FromAddress(addr);
2649 Code::cast(obj)->CodeIterateBody(v);
2650 break;
2651 }
2652 case SlotsBuffer::DEBUG_TARGET_SLOT: {
2653 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, NULL, NULL);
2654 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
2655 break;
2656 }
2657 case SlotsBuffer::JS_RETURN_SLOT: {
2658 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, NULL, NULL);
2659 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
2660 break;
2661 }
2662 default:
2663 UNREACHABLE();
2664 break;
2665 }
2666 }
2667
2668
2587 static inline void UpdateSlotsInRange(Object** start, Object** end) { 2669 static inline void UpdateSlotsInRange(Object** start, Object** end) {
2588 for (Object** slot = start; 2670 for (Object** slot = start;
2589 slot < end; 2671 slot < end;
2590 slot++) { 2672 slot++) {
2591 Object* obj = *slot; 2673 Object* obj = *slot;
2592 if (obj->IsHeapObject() && 2674 if (obj->IsHeapObject() &&
2593 MarkCompactCollector::IsOnEvacuationCandidate(obj)) { 2675 MarkCompactCollector::IsOnEvacuationCandidate(obj)) {
2594 MapWord map_word = HeapObject::cast(obj)->map_word(); 2676 MapWord map_word = HeapObject::cast(obj)->map_word();
2595 if (map_word.IsForwardingAddress()) { 2677 if (map_word.IsForwardingAddress()) {
2596 *slot = map_word.ToForwardingAddress(); 2678 *slot = map_word.ToForwardingAddress();
2597 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot)); 2679 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot));
2598 } 2680 }
2599 } 2681 }
2600 } 2682 }
2601 } 2683 }
2602 2684
2603 2685
2604 enum SweepingMode { 2686 enum SweepingMode {
2605 SWEEP_ONLY, 2687 SWEEP_ONLY,
2606 SWEEP_AND_UPDATE_SLOTS 2688 SWEEP_AND_VISIT_LIVE_OBJECTS
2607 }; 2689 };
2608 2690
2609 2691
2610 // Sweep a space precisely. After this has been done the space can 2692 // Sweep a space precisely. After this has been done the space can
2611 // be iterated precisely, hitting only the live objects. Code space 2693 // be iterated precisely, hitting only the live objects. Code space
2612 // is always swept precisely because we want to be able to iterate 2694 // is always swept precisely because we want to be able to iterate
2613 // over it. Map space is swept precisely, because it is not compacted. 2695 // over it. Map space is swept precisely, because it is not compacted.
2614 // Slots in live objects pointing into evacuation candidates are updated 2696 // Slots in live objects pointing into evacuation candidates are updated
2615 // if requested. 2697 // if requested.
2616 static void SweepPrecisely(PagedSpace* space, Page* p, SweepingMode mode) { 2698 static void SweepPrecisely(PagedSpace* space,
2699 Page* p,
2700 SweepingMode mode,
2701 ObjectVisitor* v) {
2617 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); 2702 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
2618 MarkBit::CellType* cells = p->markbits()->cells(); 2703 MarkBit::CellType* cells = p->markbits()->cells();
2619 p->MarkSweptPrecisely(); 2704 p->MarkSweptPrecisely();
2620 2705
2621 int last_cell_index = 2706 int last_cell_index =
2622 Bitmap::IndexToCell( 2707 Bitmap::IndexToCell(
2623 Bitmap::CellAlignIndex( 2708 Bitmap::CellAlignIndex(
2624 p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); 2709 p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
2625 2710
2626 int cell_index = Page::kFirstUsedCell; 2711 int cell_index = Page::kFirstUsedCell;
(...skipping 11 matching lines...) Expand all
2638 p->AddressToMarkbitIndex(object_address)))); 2723 p->AddressToMarkbitIndex(object_address))));
2639 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); 2724 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2640 int live_index = 0; 2725 int live_index = 0;
2641 for ( ; live_objects != 0; live_objects--) { 2726 for ( ; live_objects != 0; live_objects--) {
2642 Address free_end = object_address + offsets[live_index++] * kPointerSize; 2727 Address free_end = object_address + offsets[live_index++] * kPointerSize;
2643 if (free_end != free_start) { 2728 if (free_end != free_start) {
2644 space->Free(free_start, static_cast<int>(free_end - free_start)); 2729 space->Free(free_start, static_cast<int>(free_end - free_start));
2645 } 2730 }
2646 HeapObject* live_object = HeapObject::FromAddress(free_end); 2731 HeapObject* live_object = HeapObject::FromAddress(free_end);
2647 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); 2732 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
2648 int size = live_object->Size(); 2733 Map* map = live_object->map();
2649 if (mode == SWEEP_AND_UPDATE_SLOTS) { 2734 int size = live_object->SizeFromMap(map);
2650 UpdateSlotsInRange(HeapObject::RawField(live_object, kPointerSize), 2735 if (mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
2651 HeapObject::RawField(live_object, size)); 2736 live_object->IterateBody(map->instance_type(), size, v);
2652 } 2737 }
2653 free_start = free_end + size; 2738 free_start = free_end + size;
2654 } 2739 }
2655 // Clear marking bits for current cell. 2740 // Clear marking bits for current cell.
2656 cells[cell_index] = 0; 2741 cells[cell_index] = 0;
2657 } 2742 }
2658 if (free_start != p->ObjectAreaEnd()) { 2743 if (free_start != p->ObjectAreaEnd()) {
2659 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start)); 2744 space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
2660 } 2745 }
2661 } 2746 }
(...skipping 22 matching lines...) Expand all
2684 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 2769 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
2685 LiveObjectList::IterateElements(&updating_visitor); 2770 LiveObjectList::IterateElements(&updating_visitor);
2686 2771
2687 { 2772 {
2688 StoreBufferRebuildScope scope(heap_, 2773 StoreBufferRebuildScope scope(heap_,
2689 heap_->store_buffer(), 2774 heap_->store_buffer(),
2690 &Heap::ScavengeStoreBufferCallback); 2775 &Heap::ScavengeStoreBufferCallback);
2691 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); 2776 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
2692 } 2777 }
2693 2778
2694 SlotsBuffer::UpdateSlotsRecordedIn(migration_slots_buffer_); 2779 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_);
2695 if (FLAG_trace_fragmentation) { 2780 if (FLAG_trace_fragmentation) {
2696 PrintF(" migration slots buffer: %d\n", 2781 PrintF(" migration slots buffer: %d\n",
2697 SlotsBuffer::SizeOfChain(migration_slots_buffer_)); 2782 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
2698 } 2783 }
2699 2784
2700 int npages = evacuation_candidates_.length(); 2785 int npages = evacuation_candidates_.length();
2701 for (int i = 0; i < npages; i++) { 2786 for (int i = 0; i < npages; i++) {
2702 Page* p = evacuation_candidates_[i]; 2787 Page* p = evacuation_candidates_[i];
2703 ASSERT(p->IsEvacuationCandidate() || 2788 ASSERT(p->IsEvacuationCandidate() ||
2704 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 2789 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2705 2790
2706 if (p->IsEvacuationCandidate()) { 2791 if (p->IsEvacuationCandidate()) {
2707 SlotsBuffer::UpdateSlotsRecordedIn(p->slots_buffer()); 2792 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer());
2708 if (FLAG_trace_fragmentation) { 2793 if (FLAG_trace_fragmentation) {
2709 PrintF(" page %p slots buffer: %d\n", 2794 PrintF(" page %p slots buffer: %d\n",
2710 reinterpret_cast<void*>(p), 2795 reinterpret_cast<void*>(p),
2711 SlotsBuffer::SizeOfChain(p->slots_buffer())); 2796 SlotsBuffer::SizeOfChain(p->slots_buffer()));
2712 } 2797 }
2713 } else { 2798 } else {
2714 if (FLAG_gc_verbose) { 2799 if (FLAG_gc_verbose) {
2715 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", 2800 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
2716 reinterpret_cast<intptr_t>(p)); 2801 reinterpret_cast<intptr_t>(p));
2717 } 2802 }
2718 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 2803 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2719 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 2804 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
2720 SweepPrecisely(space, p, SWEEP_AND_UPDATE_SLOTS); 2805
2806 SweepPrecisely(space,
2807 p,
2808 SWEEP_AND_VISIT_LIVE_OBJECTS,
2809 &updating_visitor);
2721 } 2810 }
2722 } 2811 }
2723 2812
2724 // Update pointers from cells. 2813 // Update pointers from cells.
2725 HeapObjectIterator cell_iterator(heap_->cell_space()); 2814 HeapObjectIterator cell_iterator(heap_->cell_space());
2726 for (HeapObject* cell = cell_iterator.Next(); 2815 for (HeapObject* cell = cell_iterator.Next();
2727 cell != NULL; 2816 cell != NULL;
2728 cell = cell_iterator.Next()) { 2817 cell = cell_iterator.Next()) {
2729 if (cell->IsJSGlobalPropertyCell()) { 2818 if (cell->IsJSGlobalPropertyCell()) {
2730 Address value_address = 2819 Address value_address =
(...skipping 500 matching lines...) Expand 10 before | Expand all | Expand 10 after
3231 } 3320 }
3232 case LAZY_CONSERVATIVE: { 3321 case LAZY_CONSERVATIVE: {
3233 freed_bytes += SweepConservatively(space, p); 3322 freed_bytes += SweepConservatively(space, p);
3234 if (freed_bytes >= newspace_size && p != space->LastPage()) { 3323 if (freed_bytes >= newspace_size && p != space->LastPage()) {
3235 space->SetPagesToSweep(p->next_page(), space->LastPage()); 3324 space->SetPagesToSweep(p->next_page(), space->LastPage());
3236 lazy_sweeping_active = true; 3325 lazy_sweeping_active = true;
3237 } 3326 }
3238 break; 3327 break;
3239 } 3328 }
3240 case PRECISE: { 3329 case PRECISE: {
3241 SweepPrecisely(space, p, SWEEP_ONLY); 3330 SweepPrecisely(space, p, SWEEP_ONLY, NULL);
3242 break; 3331 break;
3243 } 3332 }
3244 default: { 3333 default: {
3245 UNREACHABLE(); 3334 UNREACHABLE();
3246 } 3335 }
3247 } 3336 }
3248 } 3337 }
3249 } 3338 }
3250 3339
3251 3340
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
3307 PROFILE(isolate, CodeDeleteEvent(obj->address())); 3396 PROFILE(isolate, CodeDeleteEvent(obj->address()));
3308 } 3397 }
3309 } 3398 }
3310 3399
3311 3400
3312 void MarkCompactCollector::Initialize() { 3401 void MarkCompactCollector::Initialize() {
3313 StaticMarkingVisitor::Initialize(); 3402 StaticMarkingVisitor::Initialize();
3314 } 3403 }
3315 3404
3316 3405
3317 void SlotsBuffer::UpdateSlots() { 3406 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
3318 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { 3407 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
3319 UpdateSlot(slots_[slot_idx]); 3408 }
3409
3410
3411 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
3412 SlotsBuffer** buffer_address,
3413 SlotType type,
3414 Address addr,
3415 AdditionMode mode) {
3416 if(!AddTo(allocator,
3417 buffer_address,
3418 reinterpret_cast<ObjectSlot>(type),
3419 mode)) {
3420 return false;
3421 }
3422 return AddTo(allocator,
3423 buffer_address,
3424 reinterpret_cast<ObjectSlot>(addr),
3425 mode);
3426 }
3427
3428
3429 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
3430 if (RelocInfo::IsCodeTarget(rmode)) {
3431 return SlotsBuffer::CODE_TARGET_SLOT;
3432 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
3433 return SlotsBuffer::DEBUG_TARGET_SLOT;
3434 } else if (RelocInfo::IsJSReturn(rmode)) {
3435 return SlotsBuffer::JS_RETURN_SLOT;
3436 }
3437 UNREACHABLE();
3438 return SlotsBuffer::NONE;
3439 }
3440
3441
3442 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Code* target) {
3443 Page* target_page = Page::FromAddress(
3444 reinterpret_cast<Address>(target));
3445 if (target_page->IsEvacuationCandidate() &&
3446 !ShouldSkipEvacuationSlotRecording(rinfo->host())) {
3447 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
3448 target_page->slots_buffer_address(),
3449 SlotTypeForRMode(rinfo->rmode()),
3450 rinfo->pc(),
3451 SlotsBuffer::FAIL_ON_OVERFLOW)) {
3452 EvictEvacuationCandidate(target_page);
3453 }
3320 } 3454 }
3321 } 3455 }
3322 3456
3323 3457
3458 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
3459 Page* target_page = Page::FromAddress(
3460 reinterpret_cast<Address>(target));
3461 if (target_page->IsEvacuationCandidate() &&
3462 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
3463 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
3464 target_page->slots_buffer_address(),
3465 SlotsBuffer::CODE_ENTRY_SLOT,
3466 slot,
3467 SlotsBuffer::FAIL_ON_OVERFLOW)) {
3468 EvictEvacuationCandidate(target_page);
3469 }
3470 }
3471 }
3472
3473
3474 static inline SlotsBuffer::SlotType DecodeSlotType(
3475 SlotsBuffer::ObjectSlot slot) {
3476 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
3477 }
3478
3479
3480 SlotsBuffer::SlotType SlotsBuffer::UpdateSlots(
Erik Corry 2011/09/06 14:10:31 Somewhere there should be a comment on why this fu
Vyacheslav Egorov (Chromium) 2011/09/06 15:01:42 Done.
3481 Heap* heap,
3482 SlotsBuffer::SlotType pending) {
3483 PointersUpdatingVisitor v(heap);
3484
3485 if (pending != NONE) {
3486 UpdateSlot(&v, pending, reinterpret_cast<Address>(slots_[0]));
3487 }
3488
3489 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
3490 ObjectSlot slot = slots_[slot_idx];
3491 if (!IsTypedSlot(slot)) {
3492 UpdateSlot(slot);
3493 } else {
3494 ++slot_idx;
3495 if (slot_idx < idx_) {
3496 UpdateSlot(&v,
3497 DecodeSlotType(slot),
3498 reinterpret_cast<Address>(slots_[slot_idx]));
3499 } else {
3500 return DecodeSlotType(slot);
3501 }
3502 }
3503 }
3504
3505 return SlotsBuffer::NONE;
3506 }
3507
3508
3324 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { 3509 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
3325 return new SlotsBuffer(next_buffer); 3510 return new SlotsBuffer(next_buffer);
3326 } 3511 }
3327 3512
3328 3513
3329 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { 3514 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
3330 delete buffer; 3515 delete buffer;
3331 } 3516 }
3332 3517
3333 3518
3334 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { 3519 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
3335 SlotsBuffer* buffer = *buffer_address; 3520 SlotsBuffer* buffer = *buffer_address;
3336 while (buffer != NULL) { 3521 while (buffer != NULL) {
3337 SlotsBuffer* next_buffer = buffer->next(); 3522 SlotsBuffer* next_buffer = buffer->next();
3338 DeallocateBuffer(buffer); 3523 DeallocateBuffer(buffer);
3339 buffer = next_buffer; 3524 buffer = next_buffer;
3340 } 3525 }
3341 *buffer_address = NULL; 3526 *buffer_address = NULL;
3342 } 3527 }
3343 3528
3344 3529
3345 } } // namespace v8::internal 3530 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698