Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(858)

Side by Side Diff: src/mark-compact.cc

Issue 8099: Collects unused maps that are only kept alive by map transitions.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 12 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mark-compact.h ('k') | src/objects.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
71 // Rather than passing the tracer around we stash it in a static member 71 // Rather than passing the tracer around we stash it in a static member
72 // variable. 72 // variable.
73 tracer_ = tracer; 73 tracer_ = tracer;
74 Prepare(); 74 Prepare();
75 // Prepare has selected whether to compact the old generation or not. 75 // Prepare has selected whether to compact the old generation or not.
76 // Tell the tracer. 76 // Tell the tracer.
77 if (IsCompacting()) tracer_->set_is_compacting(); 77 if (IsCompacting()) tracer_->set_is_compacting();
78 78
79 MarkLiveObjects(); 79 MarkLiveObjects();
80 80
81 if (FLAG_collect_maps) ClearNonLiveTransitions();
82
81 SweepLargeObjectSpace(); 83 SweepLargeObjectSpace();
82 84
83 if (compacting_collection_) { 85 if (compacting_collection_) {
84 EncodeForwardingAddresses(); 86 EncodeForwardingAddresses();
85 87
86 UpdatePointers(); 88 UpdatePointers();
87 89
88 RelocateObjects(); 90 RelocateObjects();
89 91
90 RebuildRSets(); 92 RebuildRSets();
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
128 old_gen_used += space->Size(); 130 old_gen_used += space->Size();
129 } 131 }
130 int old_gen_fragmentation = 132 int old_gen_fragmentation =
131 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); 133 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
132 if (old_gen_fragmentation > kFragmentationLimit) { 134 if (old_gen_fragmentation > kFragmentationLimit) {
133 compacting_collection_ = true; 135 compacting_collection_ = true;
134 } 136 }
135 } 137 }
136 138
137 if (FLAG_never_compact) compacting_collection_ = false; 139 if (FLAG_never_compact) compacting_collection_ = false;
140 if (FLAG_collect_maps) CreateBackPointers();
138 141
139 #ifdef DEBUG 142 #ifdef DEBUG
140 if (compacting_collection_) { 143 if (compacting_collection_) {
141 // We will write bookkeeping information to the remembered set area 144 // We will write bookkeeping information to the remembered set area
142 // starting now. 145 // starting now.
143 Page::set_rset_state(Page::NOT_IN_USE); 146 Page::set_rset_state(Page::NOT_IN_USE);
144 } 147 }
145 #endif 148 #endif
146 149
147 PagedSpaces spaces; 150 PagedSpaces spaces;
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
315 318
316 // Retrieves the Code pointer from derived code entry. 319 // Retrieves the Code pointer from derived code entry.
317 Code* CodeFromDerivedPointer(Address addr) { 320 Code* CodeFromDerivedPointer(Address addr) {
318 ASSERT(addr != NULL); 321 ASSERT(addr != NULL);
319 return reinterpret_cast<Code*>( 322 return reinterpret_cast<Code*>(
320 HeapObject::FromAddress(addr - Code::kHeaderSize)); 323 HeapObject::FromAddress(addr - Code::kHeaderSize));
321 } 324 }
322 325
323 // Visit an unmarked object. 326 // Visit an unmarked object.
324 void VisitUnmarkedObject(HeapObject* obj) { 327 void VisitUnmarkedObject(HeapObject* obj) {
328 #ifdef DEBUG
325 ASSERT(Heap::Contains(obj)); 329 ASSERT(Heap::Contains(obj));
326 #ifdef DEBUG
327 MarkCompactCollector::UpdateLiveObjectCount(obj); 330 MarkCompactCollector::UpdateLiveObjectCount(obj);
331 ASSERT(!obj->IsMarked());
332 // ASSERT(!obj->IsMap()); // Some maps are processed here.
333 // Their map transitions will be followed. If we do a test
334 // here to treat maps separately, will there be a performance impact?
328 #endif 335 #endif
329 Map* map = obj->map(); 336 Map* map = obj->map();
330 obj->SetMark(); 337 obj->SetMark();
331 MarkCompactCollector::tracer()->increment_marked_count(); 338 MarkCompactCollector::tracer()->increment_marked_count();
332 // Mark the map pointer and the body. 339 // Mark the map pointer and the body.
333 MarkCompactCollector::MarkObject(map); 340 MarkCompactCollector::MarkObject(map);
334 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this); 341 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
335 } 342 }
336 343
337 // Visit all unmarked objects pointed to by [start, end). 344 // Visit all unmarked objects pointed to by [start, end).
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
418 }; 425 };
419 426
420 427
421 void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) { 428 void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
422 #ifdef DEBUG 429 #ifdef DEBUG
423 UpdateLiveObjectCount(object); 430 UpdateLiveObjectCount(object);
424 #endif 431 #endif
425 ASSERT(!object->IsMarked()); 432 ASSERT(!object->IsMarked());
426 if (object->IsJSGlobalObject()) Counters::global_objects.Increment(); 433 if (object->IsJSGlobalObject()) Counters::global_objects.Increment();
427 434
428 if (FLAG_cleanup_caches_in_maps_at_gc && object->IsMap()) {
429 Map::cast(object)->ClearCodeCache();
430 }
431
432 object->SetMark();
433 tracer_->increment_marked_count(); 435 tracer_->increment_marked_count();
434 ASSERT(Heap::Contains(object)); 436 ASSERT(Heap::Contains(object));
435 marking_stack.Push(object); 437 if (object->IsMap()) {
438 if (FLAG_cleanup_caches_in_maps_at_gc) {
439 Map::cast(object)->ClearCodeCache();
440 }
441 object->SetMark();
442 if (FLAG_collect_maps) {
443 MarkMapContents(reinterpret_cast<Map*>(object));
444 } else {
445 marking_stack.Push(object);
446 }
447 } else {
448 object->SetMark();
449 marking_stack.Push(object);
450 }
451 }
452
453
454 void MarkCompactCollector::MarkMapContents(Map* map) {
455 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
456 HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
457
458 // Mark the Object* fields of the Map.
459 // Since the descriptor array has been marked already, it is fine
460 // that one of these fields contains a pointer to it.
461 MarkingVisitor visitor; // Has no state or contents.
462 visitor.VisitPointers(&HeapObject::RawField(map, Map::kPrototypeOffset),
463 &HeapObject::RawField(map, Map::kSize));
464 }
465
466
467 void MarkCompactCollector::MarkDescriptorArray(
468 DescriptorArray *descriptors) {
469 if (descriptors->IsMarked()) return;
470 // Empty descriptor array is marked as a root before any maps are marked.
471 ASSERT(descriptors != Heap::empty_descriptor_array());
472
473 tracer_->increment_marked_count();
474 #ifdef DEBUG
475 UpdateLiveObjectCount(descriptors);
476 #endif
477 descriptors->SetMark();
478
479 FixedArray* contents = reinterpret_cast<FixedArray*>(
480 descriptors->get(DescriptorArray::kContentArrayIndex));
481 ASSERT(contents->IsHeapObject());
482 ASSERT(!contents->IsMarked());
483 ASSERT(contents->IsFixedArray());
484 ASSERT(contents->length() >= 2);
485 tracer_->increment_marked_count();
486 #ifdef DEBUG
487 UpdateLiveObjectCount(contents);
488 #endif
489 contents->SetMark();
490 // Contents contains (value, details) pairs. If the details say
491 // that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
492 // or NULL_DESCRIPTOR, we don't mark the value as live. Only for
493 // type MAP_TRANSITION is the value a Object* (a Map*).
494 for (int i = 0; i < contents->length(); i += 2) {
495 // If the pair (value, details) at index i, i+1 is not
496 // a transition or null descriptor, mark the value.
497 PropertyDetails details(Smi::cast(contents->get(i + 1)));
498 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
499 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
500 if (object->IsHeapObject() && !object->IsMarked()) {
501 tracer_->increment_marked_count();
502 #ifdef DEBUG
503 UpdateLiveObjectCount(object);
504 #endif
505 object->SetMark();
506 marking_stack.Push(object);
507 }
508 }
509 }
510 // The DescriptorArray descriptors contains a pointer to its contents array,
511 // but the contents array is already marked.
512 marking_stack.Push(descriptors);
513 }
514
515
516 void MarkCompactCollector::CreateBackPointers() {
517 HeapObjectIterator iterator(Heap::map_space());
518 while (iterator.has_next()) {
519 Object* next_object = iterator.next();
520 if (next_object->IsMap()) { // Could also be ByteArray on free list.
521 Map* map = Map::cast(next_object);
522 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
523 map->instance_type() <= LAST_JS_OBJECT_TYPE) {
524 map->CreateBackPointers();
525 }
526 }
527 }
436 } 528 }
437 529
438 530
439 static int OverflowObjectSize(HeapObject* obj) { 531 static int OverflowObjectSize(HeapObject* obj) {
440 // Recover the normal map pointer, it might be marked as live and 532 // Recover the normal map pointer, it might be marked as live and
441 // overflowed. 533 // overflowed.
442 MapWord map_word = obj->map_word(); 534 MapWord map_word = obj->map_word();
443 map_word.ClearMark(); 535 map_word.ClearMark();
444 map_word.ClearOverflow(); 536 map_word.ClearOverflow();
445 return obj->SizeFromMap(map_word.ToMap()); 537 return obj->SizeFromMap(map_word.ToMap());
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after
668 760
669 #ifdef DEBUG 761 #ifdef DEBUG
670 if (FLAG_verify_global_gc) VerifyHeapAfterMarkingPhase(); 762 if (FLAG_verify_global_gc) VerifyHeapAfterMarkingPhase();
671 #endif 763 #endif
672 764
673 // Remove object groups after marking phase. 765 // Remove object groups after marking phase.
674 GlobalHandles::RemoveObjectGroups(); 766 GlobalHandles::RemoveObjectGroups();
675 } 767 }
676 768
677 769
770 static int CountMarkedCallback(HeapObject* obj) {
771 MapWord map_word = obj->map_word();
772 map_word.ClearMark();
773 return obj->SizeFromMap(map_word.ToMap());
774 }
775
776
678 #ifdef DEBUG 777 #ifdef DEBUG
679 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { 778 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
680 live_bytes_ += obj->Size(); 779 live_bytes_ += obj->Size();
681 if (Heap::new_space()->Contains(obj)) { 780 if (Heap::new_space()->Contains(obj)) {
682 live_young_objects_++; 781 live_young_objects_++;
683 } else if (Heap::map_space()->Contains(obj)) { 782 } else if (Heap::map_space()->Contains(obj)) {
684 ASSERT(obj->IsMap()); 783 ASSERT(obj->IsMap());
685 live_map_objects_++; 784 live_map_objects_++;
686 } else if (Heap::old_pointer_space()->Contains(obj)) { 785 } else if (Heap::old_pointer_space()->Contains(obj)) {
687 live_old_pointer_objects_++; 786 live_old_pointer_objects_++;
688 } else if (Heap::old_data_space()->Contains(obj)) { 787 } else if (Heap::old_data_space()->Contains(obj)) {
689 live_old_data_objects_++; 788 live_old_data_objects_++;
690 } else if (Heap::code_space()->Contains(obj)) { 789 } else if (Heap::code_space()->Contains(obj)) {
691 live_code_objects_++; 790 live_code_objects_++;
692 } else if (Heap::lo_space()->Contains(obj)) { 791 } else if (Heap::lo_space()->Contains(obj)) {
693 live_lo_objects_++; 792 live_lo_objects_++;
694 } else { 793 } else {
695 UNREACHABLE(); 794 UNREACHABLE();
696 } 795 }
697 } 796 }
698 797
699 798
700 static int CountMarkedCallback(HeapObject* obj) {
701 MapWord map_word = obj->map_word();
702 map_word.ClearMark();
703 return obj->SizeFromMap(map_word.ToMap());
704 }
705
706
707 void MarkCompactCollector::VerifyHeapAfterMarkingPhase() { 799 void MarkCompactCollector::VerifyHeapAfterMarkingPhase() {
708 Heap::new_space()->Verify(); 800 Heap::new_space()->Verify();
709 Heap::old_pointer_space()->Verify(); 801 Heap::old_pointer_space()->Verify();
710 Heap::old_data_space()->Verify(); 802 Heap::old_data_space()->Verify();
711 Heap::code_space()->Verify(); 803 Heap::code_space()->Verify();
712 Heap::map_space()->Verify(); 804 Heap::map_space()->Verify();
713 805
714 int live_objects; 806 int live_objects;
715 807
716 #define CHECK_LIVE_OBJECTS(it, expected) \ 808 #define CHECK_LIVE_OBJECTS(it, expected) \
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
748 void MarkCompactCollector::SweepLargeObjectSpace() { 840 void MarkCompactCollector::SweepLargeObjectSpace() {
749 #ifdef DEBUG 841 #ifdef DEBUG
750 ASSERT(state_ == MARK_LIVE_OBJECTS); 842 ASSERT(state_ == MARK_LIVE_OBJECTS);
751 state_ = 843 state_ =
752 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES; 844 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
753 #endif 845 #endif
754 // Deallocate unmarked objects and clear marked bits for marked objects. 846 // Deallocate unmarked objects and clear marked bits for marked objects.
755 Heap::lo_space()->FreeUnmarkedObjects(); 847 Heap::lo_space()->FreeUnmarkedObjects();
756 } 848 }
757 849
850 // Safe to use during marking phase only.
851 bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
852 MapWord metamap = object->map_word();
853 metamap.ClearMark();
854 return metamap.ToMap()->instance_type() == MAP_TYPE;
855 }
856
857 void MarkCompactCollector::ClearNonLiveTransitions() {
858 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
859 // Iterate over the map space, setting map transitions that go from
860 // a marked map to an unmarked map to null transitions. At the same time,
861 // set all the prototype fields of maps back to their original value,
862 // dropping the back pointers temporarily stored in the prototype field.
863 // Setting the prototype field requires following the linked list of
864 // back pointers, reversing them all at once. This allows us to find
865 // those maps with map transitions that need to be nulled, and only
866 // scan the descriptor arrays of those maps, not all maps.
867 // All of these actions are carried out only on maps of JSObects
868 // and related subtypes.
869 while (map_iterator.has_next()) {
870 Map* map = reinterpret_cast<Map*>(map_iterator.next());
871 if (!map->IsMarked() && map->IsByteArray()) continue;
872
873 ASSERT(SafeIsMap(map));
874 // Only JSObject and subtypes have map transitions and back pointers.
875 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
876 if (map->instance_type() > LAST_JS_OBJECT_TYPE) continue;
877 // Follow the chain of back pointers to find the prototype.
878 Map* current = map;
879 while (SafeIsMap(current)) {
880 current = reinterpret_cast<Map*>(current->prototype());
881 ASSERT(current->IsHeapObject());
882 }
883 Object* real_prototype = current;
884
885 // Follow back pointers, setting them to prototype,
886 // clearing map transitions when necessary.
887 current = map;
888 bool on_dead_path = !current->IsMarked();
889 Object *next;
890 while (SafeIsMap(current)) {
891 next = current->prototype();
892 // There should never be a dead map above a live map.
893 ASSERT(on_dead_path || current->IsMarked());
894
895 // A live map above a dead map indicates a dead transition.
896 // This test will always be false on the first iteration.
897 if (on_dead_path && current->IsMarked()) {
898 on_dead_path = false;
899 current->ClearNonLiveTransitions(real_prototype);
900 }
901 HeapObject::RawField(current, Map::kPrototypeOffset) =
902 real_prototype;
903 current = reinterpret_cast<Map*>(next);
904 }
905 }
906 }
758 907
759 // ------------------------------------------------------------------------- 908 // -------------------------------------------------------------------------
760 // Phase 2: Encode forwarding addresses. 909 // Phase 2: Encode forwarding addresses.
761 // When compacting, forwarding addresses for objects in old space and map 910 // When compacting, forwarding addresses for objects in old space and map
762 // space are encoded in their map pointer word (along with an encoding of 911 // space are encoded in their map pointer word (along with an encoding of
763 // their map pointers). 912 // their map pointers).
764 // 913 //
765 // 31 21 20 10 9 0 914 // 31 21 20 10 9 0
766 // +-----------------+------------------+-----------------+ 915 // +-----------------+------------------+-----------------+
767 // |forwarding offset|page offset of map|page index of map| 916 // |forwarding offset|page offset of map|page index of map|
(...skipping 999 matching lines...) Expand 10 before | Expand all | Expand 10 after
1767 1916
1768 void MarkCompactCollector::RebuildRSets() { 1917 void MarkCompactCollector::RebuildRSets() {
1769 #ifdef DEBUG 1918 #ifdef DEBUG
1770 ASSERT(state_ == RELOCATE_OBJECTS); 1919 ASSERT(state_ == RELOCATE_OBJECTS);
1771 state_ = REBUILD_RSETS; 1920 state_ = REBUILD_RSETS;
1772 #endif 1921 #endif
1773 Heap::RebuildRSets(); 1922 Heap::RebuildRSets();
1774 } 1923 }
1775 1924
1776 } } // namespace v8::internal 1925 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mark-compact.h ('k') | src/objects.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698