OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
77 | 77 |
78 SweepLargeObjectSpace(); | 78 SweepLargeObjectSpace(); |
79 | 79 |
80 if (IsCompacting()) { | 80 if (IsCompacting()) { |
81 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT); | 81 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT); |
82 EncodeForwardingAddresses(); | 82 EncodeForwardingAddresses(); |
83 | 83 |
84 UpdatePointers(); | 84 UpdatePointers(); |
85 | 85 |
86 RelocateObjects(); | 86 RelocateObjects(); |
87 | |
88 RebuildRSets(); | |
89 | |
90 } else { | 87 } else { |
91 SweepSpaces(); | 88 SweepSpaces(); |
92 } | 89 } |
93 | 90 |
94 Finish(); | 91 Finish(); |
95 | 92 |
96 // Save the count of marked objects remaining after the collection and | 93 // Save the count of marked objects remaining after the collection and |
97 // null out the GC tracer. | 94 // null out the GC tracer. |
98 previous_marked_count_ = tracer_->marked_count(); | 95 previous_marked_count_ = tracer_->marked_count(); |
99 ASSERT(previous_marked_count_ == 0); | 96 ASSERT(previous_marked_count_ == 0); |
(...skipping 14 matching lines...) Expand all Loading... |
114 | 111 |
115 compacting_collection_ = | 112 compacting_collection_ = |
116 FLAG_always_compact || force_compaction_ || compact_on_next_gc_; | 113 FLAG_always_compact || force_compaction_ || compact_on_next_gc_; |
117 compact_on_next_gc_ = false; | 114 compact_on_next_gc_ = false; |
118 | 115 |
119 if (FLAG_never_compact) compacting_collection_ = false; | 116 if (FLAG_never_compact) compacting_collection_ = false; |
120 if (!Heap::map_space()->MapPointersEncodable()) | 117 if (!Heap::map_space()->MapPointersEncodable()) |
121 compacting_collection_ = false; | 118 compacting_collection_ = false; |
122 if (FLAG_collect_maps) CreateBackPointers(); | 119 if (FLAG_collect_maps) CreateBackPointers(); |
123 | 120 |
124 #ifdef DEBUG | |
125 if (compacting_collection_) { | |
126 // We will write bookkeeping information to the remembered set area | |
127 // starting now. | |
128 Page::set_rset_state(Page::NOT_IN_USE); | |
129 } | |
130 #endif | |
131 | |
132 PagedSpaces spaces; | 121 PagedSpaces spaces; |
133 for (PagedSpace* space = spaces.next(); | 122 for (PagedSpace* space = spaces.next(); |
134 space != NULL; space = spaces.next()) { | 123 space != NULL; space = spaces.next()) { |
135 space->PrepareForMarkCompact(compacting_collection_); | 124 space->PrepareForMarkCompact(compacting_collection_); |
136 } | 125 } |
137 | 126 |
138 #ifdef DEBUG | 127 #ifdef DEBUG |
139 live_bytes_ = 0; | 128 live_bytes_ = 0; |
140 live_young_objects_size_ = 0; | 129 live_young_objects_size_ = 0; |
141 live_old_pointer_objects_size_ = 0; | 130 live_old_pointer_objects_size_ = 0; |
142 live_old_data_objects_size_ = 0; | 131 live_old_data_objects_size_ = 0; |
143 live_code_objects_size_ = 0; | 132 live_code_objects_size_ = 0; |
144 live_map_objects_size_ = 0; | 133 live_map_objects_size_ = 0; |
145 live_cell_objects_size_ = 0; | 134 live_cell_objects_size_ = 0; |
146 live_lo_objects_size_ = 0; | 135 live_lo_objects_size_ = 0; |
147 #endif | 136 #endif |
148 } | 137 } |
149 | 138 |
150 | 139 |
151 void MarkCompactCollector::Finish() { | 140 void MarkCompactCollector::Finish() { |
152 #ifdef DEBUG | 141 #ifdef DEBUG |
153 ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS); | 142 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); |
154 state_ = IDLE; | 143 state_ = IDLE; |
155 #endif | 144 #endif |
156 // The stub cache is not traversed during GC; clear the cache to | 145 // The stub cache is not traversed during GC; clear the cache to |
157 // force lazy re-initialization of it. This must be done after the | 146 // force lazy re-initialization of it. This must be done after the |
158 // GC, because it relies on the new address of certain old space | 147 // GC, because it relies on the new address of certain old space |
159 // objects (empty string, illegal builtin). | 148 // objects (empty string, illegal builtin). |
160 StubCache::Clear(); | 149 StubCache::Clear(); |
161 | 150 |
162 ExternalStringTable::CleanUp(); | 151 ExternalStringTable::CleanUp(); |
163 | 152 |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
237 map_word.ClearMark(); | 226 map_word.ClearMark(); |
238 InstanceType type = map_word.ToMap()->instance_type(); | 227 InstanceType type = map_word.ToMap()->instance_type(); |
239 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; | 228 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; |
240 | 229 |
241 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); | 230 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); |
242 if (second != Heap::raw_unchecked_empty_string()) { | 231 if (second != Heap::raw_unchecked_empty_string()) { |
243 return object; | 232 return object; |
244 } | 233 } |
245 | 234 |
246 // Since we don't have the object's start, it is impossible to update the | 235 // Since we don't have the object's start, it is impossible to update the |
247 // remembered set. Therefore, we only replace the string with its left | 236 // page dirty marks. Therefore, we only replace the string with its left |
248 // substring when the remembered set does not change. | 237 // substring when page dirty marks do not change. |
249 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); | 238 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); |
250 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object; | 239 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object; |
251 | 240 |
252 *p = first; | 241 *p = first; |
253 return HeapObject::cast(first); | 242 return HeapObject::cast(first); |
254 } | 243 } |
255 | 244 |
256 | 245 |
257 // Helper class for marking pointers in HeapObjects. | 246 // Helper class for marking pointers in HeapObjects. |
258 class MarkingVisitor : public ObjectVisitor { | 247 class MarkingVisitor : public ObjectVisitor { |
(...skipping 510 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
769 void MarkCompactCollector::SweepLargeObjectSpace() { | 758 void MarkCompactCollector::SweepLargeObjectSpace() { |
770 #ifdef DEBUG | 759 #ifdef DEBUG |
771 ASSERT(state_ == MARK_LIVE_OBJECTS); | 760 ASSERT(state_ == MARK_LIVE_OBJECTS); |
772 state_ = | 761 state_ = |
773 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES; | 762 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES; |
774 #endif | 763 #endif |
775 // Deallocate unmarked objects and clear marked bits for marked objects. | 764 // Deallocate unmarked objects and clear marked bits for marked objects. |
776 Heap::lo_space()->FreeUnmarkedObjects(); | 765 Heap::lo_space()->FreeUnmarkedObjects(); |
777 } | 766 } |
778 | 767 |
| 768 |
779 // Safe to use during marking phase only. | 769 // Safe to use during marking phase only. |
780 bool MarkCompactCollector::SafeIsMap(HeapObject* object) { | 770 bool MarkCompactCollector::SafeIsMap(HeapObject* object) { |
781 MapWord metamap = object->map_word(); | 771 MapWord metamap = object->map_word(); |
782 metamap.ClearMark(); | 772 metamap.ClearMark(); |
783 return metamap.ToMap()->instance_type() == MAP_TYPE; | 773 return metamap.ToMap()->instance_type() == MAP_TYPE; |
784 } | 774 } |
785 | 775 |
| 776 |
786 void MarkCompactCollector::ClearNonLiveTransitions() { | 777 void MarkCompactCollector::ClearNonLiveTransitions() { |
787 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback); | 778 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback); |
788 // Iterate over the map space, setting map transitions that go from | 779 // Iterate over the map space, setting map transitions that go from |
789 // a marked map to an unmarked map to null transitions. At the same time, | 780 // a marked map to an unmarked map to null transitions. At the same time, |
790 // set all the prototype fields of maps back to their original value, | 781 // set all the prototype fields of maps back to their original value, |
791 // dropping the back pointers temporarily stored in the prototype field. | 782 // dropping the back pointers temporarily stored in the prototype field. |
792 // Setting the prototype field requires following the linked list of | 783 // Setting the prototype field requires following the linked list of |
793 // back pointers, reversing them all at once. This allows us to find | 784 // back pointers, reversing them all at once. This allows us to find |
794 // those maps with map transitions that need to be nulled, and only | 785 // those maps with map transitions that need to be nulled, and only |
795 // scan the descriptor arrays of those maps, not all maps. | 786 // scan the descriptor arrays of those maps, not all maps. |
(...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1071 } | 1062 } |
1072 | 1063 |
1073 | 1064 |
1074 // We scavange new space simultaneously with sweeping. This is done in two | 1065 // We scavange new space simultaneously with sweeping. This is done in two |
1075 // passes. | 1066 // passes. |
1076 // The first pass migrates all alive objects from one semispace to another or | 1067 // The first pass migrates all alive objects from one semispace to another or |
1077 // promotes them to old space. Forwading address is written directly into | 1068 // promotes them to old space. Forwading address is written directly into |
1078 // first word of object without any encoding. If object is dead we are writing | 1069 // first word of object without any encoding. If object is dead we are writing |
1079 // NULL as a forwarding address. | 1070 // NULL as a forwarding address. |
1080 // The second pass updates pointers to new space in all spaces. It is possible | 1071 // The second pass updates pointers to new space in all spaces. It is possible |
1081 // to encounter pointers to dead objects during traversal of remembered set for | 1072 // to encounter pointers to dead objects during traversal of dirty regions we |
1082 // map space because remembered set bits corresponding to dead maps are cleared | 1073 // should clear them to avoid encountering them during next dirty regions |
1083 // later during map space sweeping. | 1074 // iteration. |
1084 static void MigrateObject(Address dst, Address src, int size) { | 1075 static void MigrateObject(Address dst, |
1085 Heap::CopyBlock(reinterpret_cast<Object**>(dst), | 1076 Address src, |
1086 reinterpret_cast<Object**>(src), | 1077 int size, |
1087 size); | 1078 bool to_old_space) { |
| 1079 if (to_old_space) { |
| 1080 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size); |
| 1081 } else { |
| 1082 Heap::CopyBlock(dst, src, size); |
| 1083 } |
1088 | 1084 |
1089 Memory::Address_at(src) = dst; | 1085 Memory::Address_at(src) = dst; |
1090 } | 1086 } |
1091 | 1087 |
1092 | 1088 |
1093 // Visitor for updating pointers from live objects in old spaces to new space. | 1089 // Visitor for updating pointers from live objects in old spaces to new space. |
1094 // It does not expect to encounter pointers to dead objects. | 1090 // It does not expect to encounter pointers to dead objects. |
1095 class PointersToNewGenUpdatingVisitor: public ObjectVisitor { | 1091 class PointersToNewGenUpdatingVisitor: public ObjectVisitor { |
1096 public: | 1092 public: |
1097 void VisitPointer(Object** p) { | 1093 void VisitPointer(Object** p) { |
(...skipping 26 matching lines...) Expand all Loading... |
1124 HeapObject* obj = HeapObject::cast(*p); | 1120 HeapObject* obj = HeapObject::cast(*p); |
1125 Address old_addr = obj->address(); | 1121 Address old_addr = obj->address(); |
1126 | 1122 |
1127 if (Heap::new_space()->Contains(obj)) { | 1123 if (Heap::new_space()->Contains(obj)) { |
1128 ASSERT(Heap::InFromSpace(*p)); | 1124 ASSERT(Heap::InFromSpace(*p)); |
1129 *p = HeapObject::FromAddress(Memory::Address_at(old_addr)); | 1125 *p = HeapObject::FromAddress(Memory::Address_at(old_addr)); |
1130 } | 1126 } |
1131 } | 1127 } |
1132 }; | 1128 }; |
1133 | 1129 |
| 1130 |
1134 // Visitor for updating pointers from live objects in old spaces to new space. | 1131 // Visitor for updating pointers from live objects in old spaces to new space. |
1135 // It can encounter pointers to dead objects in new space when traversing map | 1132 // It can encounter pointers to dead objects in new space when traversing map |
1136 // space (see comment for MigrateObject). | 1133 // space (see comment for MigrateObject). |
1137 static void UpdatePointerToNewGen(HeapObject** p) { | 1134 static void UpdatePointerToNewGen(HeapObject** p) { |
1138 if (!(*p)->IsHeapObject()) return; | 1135 if (!(*p)->IsHeapObject()) return; |
1139 | 1136 |
1140 Address old_addr = (*p)->address(); | 1137 Address old_addr = (*p)->address(); |
1141 ASSERT(Heap::InFromSpace(*p)); | 1138 ASSERT(Heap::InFromSpace(*p)); |
1142 | 1139 |
1143 Address new_addr = Memory::Address_at(old_addr); | 1140 Address new_addr = Memory::Address_at(old_addr); |
1144 | 1141 |
1145 // Object pointed by *p is dead. Update is not required. | 1142 if (new_addr == NULL) { |
1146 if (new_addr == NULL) return; | 1143 // We encountered pointer to a dead object. Clear it so we will |
1147 | 1144 // not visit it again during next iteration of dirty regions. |
1148 *p = HeapObject::FromAddress(new_addr); | 1145 *p = NULL; |
| 1146 } else { |
| 1147 *p = HeapObject::FromAddress(new_addr); |
| 1148 } |
1149 } | 1149 } |
1150 | 1150 |
1151 | 1151 |
1152 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) { | 1152 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) { |
1153 Address old_addr = HeapObject::cast(*p)->address(); | 1153 Address old_addr = HeapObject::cast(*p)->address(); |
1154 Address new_addr = Memory::Address_at(old_addr); | 1154 Address new_addr = Memory::Address_at(old_addr); |
1155 return String::cast(HeapObject::FromAddress(new_addr)); | 1155 return String::cast(HeapObject::FromAddress(new_addr)); |
1156 } | 1156 } |
1157 | 1157 |
1158 | 1158 |
1159 static bool TryPromoteObject(HeapObject* object, int object_size) { | 1159 static bool TryPromoteObject(HeapObject* object, int object_size) { |
1160 Object* result; | 1160 Object* result; |
1161 | 1161 |
1162 if (object_size > Heap::MaxObjectSizeInPagedSpace()) { | 1162 if (object_size > Heap::MaxObjectSizeInPagedSpace()) { |
1163 result = Heap::lo_space()->AllocateRawFixedArray(object_size); | 1163 result = Heap::lo_space()->AllocateRawFixedArray(object_size); |
1164 if (!result->IsFailure()) { | 1164 if (!result->IsFailure()) { |
1165 HeapObject* target = HeapObject::cast(result); | 1165 HeapObject* target = HeapObject::cast(result); |
1166 MigrateObject(target->address(), object->address(), object_size); | 1166 MigrateObject(target->address(), object->address(), object_size, true); |
1167 Heap::UpdateRSet(target); | |
1168 MarkCompactCollector::tracer()-> | 1167 MarkCompactCollector::tracer()-> |
1169 increment_promoted_objects_size(object_size); | 1168 increment_promoted_objects_size(object_size); |
1170 return true; | 1169 return true; |
1171 } | 1170 } |
1172 } else { | 1171 } else { |
1173 OldSpace* target_space = Heap::TargetSpace(object); | 1172 OldSpace* target_space = Heap::TargetSpace(object); |
1174 | 1173 |
1175 ASSERT(target_space == Heap::old_pointer_space() || | 1174 ASSERT(target_space == Heap::old_pointer_space() || |
1176 target_space == Heap::old_data_space()); | 1175 target_space == Heap::old_data_space()); |
1177 result = target_space->AllocateRaw(object_size); | 1176 result = target_space->AllocateRaw(object_size); |
1178 if (!result->IsFailure()) { | 1177 if (!result->IsFailure()) { |
1179 HeapObject* target = HeapObject::cast(result); | 1178 HeapObject* target = HeapObject::cast(result); |
1180 MigrateObject(target->address(), object->address(), object_size); | 1179 MigrateObject(target->address(), |
1181 if (target_space == Heap::old_pointer_space()) { | 1180 object->address(), |
1182 Heap::UpdateRSet(target); | 1181 object_size, |
1183 } | 1182 target_space == Heap::old_pointer_space()); |
1184 MarkCompactCollector::tracer()-> | 1183 MarkCompactCollector::tracer()-> |
1185 increment_promoted_objects_size(object_size); | 1184 increment_promoted_objects_size(object_size); |
1186 return true; | 1185 return true; |
1187 } | 1186 } |
1188 } | 1187 } |
1189 | 1188 |
1190 return false; | 1189 return false; |
1191 } | 1190 } |
1192 | 1191 |
1193 | 1192 |
(...skipping 21 matching lines...) Expand all Loading... |
1215 MarkCompactCollector::tracer()->decrement_marked_count(); | 1214 MarkCompactCollector::tracer()->decrement_marked_count(); |
1216 | 1215 |
1217 size = object->Size(); | 1216 size = object->Size(); |
1218 survivors_size += size; | 1217 survivors_size += size; |
1219 | 1218 |
1220 // Aggressively promote young survivors to the old space. | 1219 // Aggressively promote young survivors to the old space. |
1221 if (TryPromoteObject(object, size)) { | 1220 if (TryPromoteObject(object, size)) { |
1222 continue; | 1221 continue; |
1223 } | 1222 } |
1224 | 1223 |
1225 // Promotion either failed or not required. | 1224 // Promotion failed. Just migrate object to another semispace. |
1226 // Copy the content of the object. | |
1227 Object* target = space->AllocateRaw(size); | 1225 Object* target = space->AllocateRaw(size); |
1228 | 1226 |
1229 // Allocation cannot fail at this point: semispaces are of equal size. | 1227 // Allocation cannot fail at this point: semispaces are of equal size. |
1230 ASSERT(!target->IsFailure()); | 1228 ASSERT(!target->IsFailure()); |
1231 | 1229 |
1232 MigrateObject(HeapObject::cast(target)->address(), current, size); | 1230 MigrateObject(HeapObject::cast(target)->address(), |
| 1231 current, |
| 1232 size, |
| 1233 false); |
1233 } else { | 1234 } else { |
1234 size = object->Size(); | 1235 size = object->Size(); |
1235 Memory::Address_at(current) = NULL; | 1236 Memory::Address_at(current) = NULL; |
1236 } | 1237 } |
1237 } | 1238 } |
1238 | 1239 |
1239 // Second pass: find pointers to new space and update them. | 1240 // Second pass: find pointers to new space and update them. |
1240 PointersToNewGenUpdatingVisitor updating_visitor; | 1241 PointersToNewGenUpdatingVisitor updating_visitor; |
1241 | 1242 |
1242 // Update pointers in to space. | 1243 // Update pointers in to space. |
1243 HeapObject* object; | 1244 HeapObject* object; |
1244 for (Address current = space->bottom(); | 1245 for (Address current = space->bottom(); |
1245 current < space->top(); | 1246 current < space->top(); |
1246 current += object->Size()) { | 1247 current += object->Size()) { |
1247 object = HeapObject::FromAddress(current); | 1248 object = HeapObject::FromAddress(current); |
1248 | 1249 |
1249 object->IterateBody(object->map()->instance_type(), | 1250 object->IterateBody(object->map()->instance_type(), |
1250 object->Size(), | 1251 object->Size(), |
1251 &updating_visitor); | 1252 &updating_visitor); |
1252 } | 1253 } |
1253 | 1254 |
1254 // Update roots. | 1255 // Update roots. |
1255 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE); | 1256 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE); |
1256 | 1257 |
1257 // Update pointers in old spaces. | 1258 // Update pointers in old spaces. |
1258 Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen); | 1259 Heap::IterateDirtyRegions(Heap::old_pointer_space(), |
1259 Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen); | 1260 &Heap::IteratePointersInDirtyRegion, |
1260 Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen); | 1261 &UpdatePointerToNewGen, |
| 1262 Heap::WATERMARK_SHOULD_BE_VALID); |
| 1263 |
| 1264 Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen); |
1261 | 1265 |
1262 // Update pointers from cells. | 1266 // Update pointers from cells. |
1263 HeapObjectIterator cell_iterator(Heap::cell_space()); | 1267 HeapObjectIterator cell_iterator(Heap::cell_space()); |
1264 for (HeapObject* cell = cell_iterator.next(); | 1268 for (HeapObject* cell = cell_iterator.next(); |
1265 cell != NULL; | 1269 cell != NULL; |
1266 cell = cell_iterator.next()) { | 1270 cell = cell_iterator.next()) { |
1267 if (cell->IsJSGlobalPropertyCell()) { | 1271 if (cell->IsJSGlobalPropertyCell()) { |
1268 Address value_address = | 1272 Address value_address = |
1269 reinterpret_cast<Address>(cell) + | 1273 reinterpret_cast<Address>(cell) + |
1270 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); | 1274 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1316 | 1320 |
1317 for (Address current = p->ObjectAreaStart(); | 1321 for (Address current = p->ObjectAreaStart(); |
1318 current < p->AllocationTop(); | 1322 current < p->AllocationTop(); |
1319 current += object->Size()) { | 1323 current += object->Size()) { |
1320 object = HeapObject::FromAddress(current); | 1324 object = HeapObject::FromAddress(current); |
1321 if (object->IsMarked()) { | 1325 if (object->IsMarked()) { |
1322 object->ClearMark(); | 1326 object->ClearMark(); |
1323 MarkCompactCollector::tracer()->decrement_marked_count(); | 1327 MarkCompactCollector::tracer()->decrement_marked_count(); |
1324 | 1328 |
1325 if (!is_previous_alive) { // Transition from free to live. | 1329 if (!is_previous_alive) { // Transition from free to live. |
1326 dealloc(free_start, static_cast<int>(current - free_start), true); | 1330 dealloc(free_start, |
| 1331 static_cast<int>(current - free_start), |
| 1332 true, |
| 1333 false); |
1327 is_previous_alive = true; | 1334 is_previous_alive = true; |
1328 } | 1335 } |
1329 } else { | 1336 } else { |
1330 MarkCompactCollector::ReportDeleteIfNeeded(object); | 1337 MarkCompactCollector::ReportDeleteIfNeeded(object); |
1331 if (is_previous_alive) { // Transition from live to free. | 1338 if (is_previous_alive) { // Transition from live to free. |
1332 free_start = current; | 1339 free_start = current; |
1333 is_previous_alive = false; | 1340 is_previous_alive = false; |
1334 } | 1341 } |
1335 } | 1342 } |
1336 // The object is now unmarked for the call to Size() at the top of the | 1343 // The object is now unmarked for the call to Size() at the top of the |
1337 // loop. | 1344 // loop. |
1338 } | 1345 } |
1339 | 1346 |
1340 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop()) | 1347 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop()) |
1341 || (!is_previous_alive && free_start == p->ObjectAreaStart()); | 1348 || (!is_previous_alive && free_start == p->ObjectAreaStart()); |
1342 | 1349 |
1343 if (page_is_empty) { | 1350 if (page_is_empty) { |
1344 // This page is empty. Check whether we are in the middle of | 1351 // This page is empty. Check whether we are in the middle of |
1345 // sequence of empty pages and start one if not. | 1352 // sequence of empty pages and start one if not. |
1346 if (!first_empty_page->is_valid()) { | 1353 if (!first_empty_page->is_valid()) { |
1347 first_empty_page = p; | 1354 first_empty_page = p; |
1348 prec_first_empty_page = prev; | 1355 prec_first_empty_page = prev; |
1349 } | 1356 } |
1350 | 1357 |
1351 if (!is_previous_alive) { | 1358 if (!is_previous_alive) { |
1352 // There are dead objects on this page. Update space accounting stats | 1359 // There are dead objects on this page. Update space accounting stats |
1353 // without putting anything into free list. | 1360 // without putting anything into free list. |
1354 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start); | 1361 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start); |
1355 if (size_in_bytes > 0) { | 1362 if (size_in_bytes > 0) { |
1356 dealloc(free_start, size_in_bytes, false); | 1363 dealloc(free_start, size_in_bytes, false, true); |
1357 } | 1364 } |
1358 } | 1365 } |
1359 } else { | 1366 } else { |
1360 // This page is not empty. Sequence of empty pages ended on the previous | 1367 // This page is not empty. Sequence of empty pages ended on the previous |
1361 // one. | 1368 // one. |
1362 if (first_empty_page->is_valid()) { | 1369 if (first_empty_page->is_valid()) { |
1363 space->FreePages(prec_first_empty_page, prev); | 1370 space->FreePages(prec_first_empty_page, prev); |
1364 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL); | 1371 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL); |
1365 } | 1372 } |
1366 | 1373 |
1367 // If there is a free ending area on one of the previous pages we have | 1374 // If there is a free ending area on one of the previous pages we have |
1368 // deallocate that area and put it on the free list. | 1375 // deallocate that area and put it on the free list. |
1369 if (last_free_size > 0) { | 1376 if (last_free_size > 0) { |
1370 dealloc(last_free_start, last_free_size, true); | 1377 Page::FromAddress(last_free_start)-> |
| 1378 SetAllocationWatermark(last_free_start); |
| 1379 dealloc(last_free_start, last_free_size, true, true); |
1371 last_free_start = NULL; | 1380 last_free_start = NULL; |
1372 last_free_size = 0; | 1381 last_free_size = 0; |
1373 } | 1382 } |
1374 | 1383 |
1375 // If the last region of this page was not live we remember it. | 1384 // If the last region of this page was not live we remember it. |
1376 if (!is_previous_alive) { | 1385 if (!is_previous_alive) { |
1377 ASSERT(last_free_size == 0); | 1386 ASSERT(last_free_size == 0); |
1378 last_free_size = static_cast<int>(p->AllocationTop() - free_start); | 1387 last_free_size = static_cast<int>(p->AllocationTop() - free_start); |
1379 last_free_start = free_start; | 1388 last_free_start = free_start; |
1380 } | 1389 } |
(...skipping 10 matching lines...) Expand all Loading... |
1391 // to the beginning of first empty page. | 1400 // to the beginning of first empty page. |
1392 ASSERT(prev == space->AllocationTopPage()); | 1401 ASSERT(prev == space->AllocationTopPage()); |
1393 | 1402 |
1394 new_allocation_top = first_empty_page->ObjectAreaStart(); | 1403 new_allocation_top = first_empty_page->ObjectAreaStart(); |
1395 } | 1404 } |
1396 | 1405 |
1397 if (last_free_size > 0) { | 1406 if (last_free_size > 0) { |
1398 // There was a free ending area on the previous page. | 1407 // There was a free ending area on the previous page. |
1399 // Deallocate it without putting it into freelist and move allocation | 1408 // Deallocate it without putting it into freelist and move allocation |
1400 // top to the beginning of this free area. | 1409 // top to the beginning of this free area. |
1401 dealloc(last_free_start, last_free_size, false); | 1410 dealloc(last_free_start, last_free_size, false, true); |
1402 new_allocation_top = last_free_start; | 1411 new_allocation_top = last_free_start; |
1403 } | 1412 } |
1404 | 1413 |
1405 if (new_allocation_top != NULL) { | 1414 if (new_allocation_top != NULL) { |
1406 #ifdef DEBUG | 1415 #ifdef DEBUG |
1407 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top); | 1416 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top); |
1408 if (!first_empty_page->is_valid()) { | 1417 if (!first_empty_page->is_valid()) { |
1409 ASSERT(new_allocation_top_page == space->AllocationTopPage()); | 1418 ASSERT(new_allocation_top_page == space->AllocationTopPage()); |
1410 } else if (last_free_size > 0) { | 1419 } else if (last_free_size > 0) { |
1411 ASSERT(new_allocation_top_page == prec_first_empty_page); | 1420 ASSERT(new_allocation_top_page == prec_first_empty_page); |
1412 } else { | 1421 } else { |
1413 ASSERT(new_allocation_top_page == first_empty_page); | 1422 ASSERT(new_allocation_top_page == first_empty_page); |
1414 } | 1423 } |
1415 #endif | 1424 #endif |
1416 | 1425 |
1417 space->SetTop(new_allocation_top); | 1426 space->SetTop(new_allocation_top); |
1418 } | 1427 } |
1419 } | 1428 } |
1420 | 1429 |
1421 | 1430 |
1422 void MarkCompactCollector::DeallocateOldPointerBlock(Address start, | 1431 void MarkCompactCollector::DeallocateOldPointerBlock(Address start, |
1423 int size_in_bytes, | 1432 int size_in_bytes, |
1424 bool add_to_freelist) { | 1433 bool add_to_freelist, |
1425 Heap::ClearRSetRange(start, size_in_bytes); | 1434 bool last_on_page) { |
1426 Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist); | 1435 Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist); |
1427 } | 1436 } |
1428 | 1437 |
1429 | 1438 |
1430 void MarkCompactCollector::DeallocateOldDataBlock(Address start, | 1439 void MarkCompactCollector::DeallocateOldDataBlock(Address start, |
1431 int size_in_bytes, | 1440 int size_in_bytes, |
1432 bool add_to_freelist) { | 1441 bool add_to_freelist, |
| 1442 bool last_on_page) { |
1433 Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist); | 1443 Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist); |
1434 } | 1444 } |
1435 | 1445 |
1436 | 1446 |
1437 void MarkCompactCollector::DeallocateCodeBlock(Address start, | 1447 void MarkCompactCollector::DeallocateCodeBlock(Address start, |
1438 int size_in_bytes, | 1448 int size_in_bytes, |
1439 bool add_to_freelist) { | 1449 bool add_to_freelist, |
| 1450 bool last_on_page) { |
1440 Heap::code_space()->Free(start, size_in_bytes, add_to_freelist); | 1451 Heap::code_space()->Free(start, size_in_bytes, add_to_freelist); |
1441 } | 1452 } |
1442 | 1453 |
1443 | 1454 |
1444 void MarkCompactCollector::DeallocateMapBlock(Address start, | 1455 void MarkCompactCollector::DeallocateMapBlock(Address start, |
1445 int size_in_bytes, | 1456 int size_in_bytes, |
1446 bool add_to_freelist) { | 1457 bool add_to_freelist, |
| 1458 bool last_on_page) { |
1447 // Objects in map space are assumed to have size Map::kSize and a | 1459 // Objects in map space are assumed to have size Map::kSize and a |
1448 // valid map in their first word. Thus, we break the free block up into | 1460 // valid map in their first word. Thus, we break the free block up into |
1449 // chunks and free them separately. | 1461 // chunks and free them separately. |
1450 ASSERT(size_in_bytes % Map::kSize == 0); | 1462 ASSERT(size_in_bytes % Map::kSize == 0); |
1451 Heap::ClearRSetRange(start, size_in_bytes); | |
1452 Address end = start + size_in_bytes; | 1463 Address end = start + size_in_bytes; |
1453 for (Address a = start; a < end; a += Map::kSize) { | 1464 for (Address a = start; a < end; a += Map::kSize) { |
1454 Heap::map_space()->Free(a, add_to_freelist); | 1465 Heap::map_space()->Free(a, add_to_freelist); |
1455 } | 1466 } |
1456 } | 1467 } |
1457 | 1468 |
1458 | 1469 |
1459 void MarkCompactCollector::DeallocateCellBlock(Address start, | 1470 void MarkCompactCollector::DeallocateCellBlock(Address start, |
1460 int size_in_bytes, | 1471 int size_in_bytes, |
1461 bool add_to_freelist) { | 1472 bool add_to_freelist, |
| 1473 bool last_on_page) { |
1462 // Free-list elements in cell space are assumed to have a fixed size. | 1474 // Free-list elements in cell space are assumed to have a fixed size. |
1463 // We break the free block into chunks and add them to the free list | 1475 // We break the free block into chunks and add them to the free list |
1464 // individually. | 1476 // individually. |
1465 int size = Heap::cell_space()->object_size_in_bytes(); | 1477 int size = Heap::cell_space()->object_size_in_bytes(); |
1466 ASSERT(size_in_bytes % size == 0); | 1478 ASSERT(size_in_bytes % size == 0); |
1467 Heap::ClearRSetRange(start, size_in_bytes); | |
1468 Address end = start + size_in_bytes; | 1479 Address end = start + size_in_bytes; |
1469 for (Address a = start; a < end; a += size) { | 1480 for (Address a = start; a < end; a += size) { |
1470 Heap::cell_space()->Free(a, add_to_freelist); | 1481 Heap::cell_space()->Free(a, add_to_freelist); |
1471 } | 1482 } |
1472 } | 1483 } |
1473 | 1484 |
1474 | 1485 |
1475 void MarkCompactCollector::EncodeForwardingAddresses() { | 1486 void MarkCompactCollector::EncodeForwardingAddresses() { |
1476 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); | 1487 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); |
1477 // Objects in the active semispace of the young generation may be | 1488 // Objects in the active semispace of the young generation may be |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1556 #ifdef DEBUG | 1567 #ifdef DEBUG |
1557 CheckNoMapsToEvacuate(); | 1568 CheckNoMapsToEvacuate(); |
1558 #endif | 1569 #endif |
1559 } | 1570 } |
1560 | 1571 |
1561 void UpdateMapPointersInRoots() { | 1572 void UpdateMapPointersInRoots() { |
1562 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG); | 1573 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG); |
1563 GlobalHandles::IterateWeakRoots(&map_updating_visitor_); | 1574 GlobalHandles::IterateWeakRoots(&map_updating_visitor_); |
1564 } | 1575 } |
1565 | 1576 |
1566 void FinishMapSpace() { | |
1567 // Iterate through to space and finish move. | |
1568 MapIterator it; | |
1569 HeapObject* o = it.next(); | |
1570 for (; o != first_map_to_evacuate_; o = it.next()) { | |
1571 ASSERT(o != NULL); | |
1572 Map* map = reinterpret_cast<Map*>(o); | |
1573 ASSERT(!map->IsMarked()); | |
1574 ASSERT(!map->IsOverflowed()); | |
1575 ASSERT(map->IsMap()); | |
1576 Heap::UpdateRSet(map); | |
1577 } | |
1578 } | |
1579 | |
1580 void UpdateMapPointersInPagedSpace(PagedSpace* space) { | 1577 void UpdateMapPointersInPagedSpace(PagedSpace* space) { |
1581 ASSERT(space != Heap::map_space()); | 1578 ASSERT(space != Heap::map_space()); |
1582 | 1579 |
1583 PageIterator it(space, PageIterator::PAGES_IN_USE); | 1580 PageIterator it(space, PageIterator::PAGES_IN_USE); |
1584 while (it.has_next()) { | 1581 while (it.has_next()) { |
1585 Page* p = it.next(); | 1582 Page* p = it.next(); |
1586 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop()); | 1583 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop()); |
1587 } | 1584 } |
1588 } | 1585 } |
1589 | 1586 |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1662 ASSERT(map->IsMap()); | 1659 ASSERT(map->IsMap()); |
1663 return map; | 1660 return map; |
1664 } | 1661 } |
1665 | 1662 |
1666 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) { | 1663 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) { |
1667 ASSERT(FreeListNode::IsFreeListNode(vacant_map)); | 1664 ASSERT(FreeListNode::IsFreeListNode(vacant_map)); |
1668 ASSERT(map_to_evacuate->IsMap()); | 1665 ASSERT(map_to_evacuate->IsMap()); |
1669 | 1666 |
1670 ASSERT(Map::kSize % 4 == 0); | 1667 ASSERT(Map::kSize % 4 == 0); |
1671 | 1668 |
1672 Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()), | 1669 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(), |
1673 reinterpret_cast<Object**>(map_to_evacuate->address()), | 1670 map_to_evacuate->address(), |
1674 Map::kSize); | 1671 Map::kSize); |
1675 | 1672 |
1676 ASSERT(vacant_map->IsMap()); // Due to memcpy above. | 1673 ASSERT(vacant_map->IsMap()); // Due to memcpy above. |
1677 | 1674 |
1678 MapWord forwarding_map_word = MapWord::FromMap(vacant_map); | 1675 MapWord forwarding_map_word = MapWord::FromMap(vacant_map); |
1679 forwarding_map_word.SetOverflow(); | 1676 forwarding_map_word.SetOverflow(); |
1680 map_to_evacuate->set_map_word(forwarding_map_word); | 1677 map_to_evacuate->set_map_word(forwarding_map_word); |
1681 | 1678 |
1682 ASSERT(map_to_evacuate->map_word().IsOverflowed()); | 1679 ASSERT(map_to_evacuate->map_word().IsOverflowed()); |
1683 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map); | 1680 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map); |
1684 } | 1681 } |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1749 // bits and free the nonlive blocks (for old and map spaces). We sweep | 1746 // bits and free the nonlive blocks (for old and map spaces). We sweep |
1750 // the map space last because freeing non-live maps overwrites them and | 1747 // the map space last because freeing non-live maps overwrites them and |
1751 // the other spaces rely on possibly non-live maps to get the sizes for | 1748 // the other spaces rely on possibly non-live maps to get the sizes for |
1752 // non-live objects. | 1749 // non-live objects. |
1753 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock); | 1750 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock); |
1754 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock); | 1751 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock); |
1755 SweepSpace(Heap::code_space(), &DeallocateCodeBlock); | 1752 SweepSpace(Heap::code_space(), &DeallocateCodeBlock); |
1756 SweepSpace(Heap::cell_space(), &DeallocateCellBlock); | 1753 SweepSpace(Heap::cell_space(), &DeallocateCellBlock); |
1757 SweepNewSpace(Heap::new_space()); | 1754 SweepNewSpace(Heap::new_space()); |
1758 SweepSpace(Heap::map_space(), &DeallocateMapBlock); | 1755 SweepSpace(Heap::map_space(), &DeallocateMapBlock); |
| 1756 |
| 1757 Heap::IterateDirtyRegions(Heap::map_space(), |
| 1758 &Heap::IteratePointersInDirtyMapsRegion, |
| 1759 &UpdatePointerToNewGen, |
| 1760 Heap::WATERMARK_SHOULD_BE_VALID); |
| 1761 |
1759 int live_maps_size = Heap::map_space()->Size(); | 1762 int live_maps_size = Heap::map_space()->Size(); |
1760 int live_maps = live_maps_size / Map::kSize; | 1763 int live_maps = live_maps_size / Map::kSize; |
1761 ASSERT(live_map_objects_size_ == live_maps_size); | 1764 ASSERT(live_map_objects_size_ == live_maps_size); |
1762 | 1765 |
1763 if (Heap::map_space()->NeedsCompaction(live_maps)) { | 1766 if (Heap::map_space()->NeedsCompaction(live_maps)) { |
1764 MapCompact map_compact(live_maps); | 1767 MapCompact map_compact(live_maps); |
1765 | 1768 |
1766 map_compact.CompactMaps(); | 1769 map_compact.CompactMaps(); |
1767 map_compact.UpdateMapPointersInRoots(); | 1770 map_compact.UpdateMapPointersInRoots(); |
1768 | 1771 |
1769 map_compact.FinishMapSpace(); | |
1770 PagedSpaces spaces; | 1772 PagedSpaces spaces; |
1771 for (PagedSpace* space = spaces.next(); | 1773 for (PagedSpace* space = spaces.next(); |
1772 space != NULL; space = spaces.next()) { | 1774 space != NULL; space = spaces.next()) { |
1773 if (space == Heap::map_space()) continue; | 1775 if (space == Heap::map_space()) continue; |
1774 map_compact.UpdateMapPointersInPagedSpace(space); | 1776 map_compact.UpdateMapPointersInPagedSpace(space); |
1775 } | 1777 } |
1776 map_compact.UpdateMapPointersInNewSpace(); | 1778 map_compact.UpdateMapPointersInNewSpace(); |
1777 map_compact.UpdateMapPointersInLargeObjectSpace(); | 1779 map_compact.UpdateMapPointersInLargeObjectSpace(); |
1778 | 1780 |
1779 map_compact.Finish(); | 1781 map_compact.Finish(); |
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2032 Address obj_addr = obj->address(); | 2034 Address obj_addr = obj->address(); |
2033 | 2035 |
2034 // Find the first live object's forwarding address. | 2036 // Find the first live object's forwarding address. |
2035 Page* p = Page::FromAddress(obj_addr); | 2037 Page* p = Page::FromAddress(obj_addr); |
2036 Address first_forwarded = p->mc_first_forwarded; | 2038 Address first_forwarded = p->mc_first_forwarded; |
2037 | 2039 |
2038 // Page start address of forwarded address. | 2040 // Page start address of forwarded address. |
2039 Page* forwarded_page = Page::FromAddress(first_forwarded); | 2041 Page* forwarded_page = Page::FromAddress(first_forwarded); |
2040 int forwarded_offset = forwarded_page->Offset(first_forwarded); | 2042 int forwarded_offset = forwarded_page->Offset(first_forwarded); |
2041 | 2043 |
2042 // Find end of allocation of in the page of first_forwarded. | 2044 // Find end of allocation in the page of first_forwarded. |
2043 Address mc_top = forwarded_page->mc_relocation_top; | 2045 int mc_top_offset = forwarded_page->AllocationWatermarkOffset(); |
2044 int mc_top_offset = forwarded_page->Offset(mc_top); | |
2045 | 2046 |
2046 // Check if current object's forward pointer is in the same page | 2047 // Check if current object's forward pointer is in the same page |
2047 // as the first live object's forwarding pointer | 2048 // as the first live object's forwarding pointer |
2048 if (forwarded_offset + offset < mc_top_offset) { | 2049 if (forwarded_offset + offset < mc_top_offset) { |
2049 // In the same page. | 2050 // In the same page. |
2050 return first_forwarded + offset; | 2051 return first_forwarded + offset; |
2051 } | 2052 } |
2052 | 2053 |
2053 // Must be in the next page, NOTE: this may cross chunks. | 2054 // Must be in the next page, NOTE: this may cross chunks. |
2054 Page* next_page = forwarded_page->next_page(); | 2055 Page* next_page = forwarded_page->next_page(); |
2055 ASSERT(next_page->is_valid()); | 2056 ASSERT(next_page->is_valid()); |
2056 | 2057 |
2057 offset -= (mc_top_offset - forwarded_offset); | 2058 offset -= (mc_top_offset - forwarded_offset); |
2058 offset += Page::kObjectStartOffset; | 2059 offset += Page::kObjectStartOffset; |
2059 | 2060 |
2060 ASSERT_PAGE_OFFSET(offset); | 2061 ASSERT_PAGE_OFFSET(offset); |
2061 ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top); | 2062 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop()); |
2062 | 2063 |
2063 return next_page->OffsetToAddress(offset); | 2064 return next_page->OffsetToAddress(offset); |
2064 } | 2065 } |
2065 | 2066 |
2066 | 2067 |
2067 // ------------------------------------------------------------------------- | 2068 // ------------------------------------------------------------------------- |
2068 // Phase 4: Relocate objects | 2069 // Phase 4: Relocate objects |
2069 | 2070 |
2070 void MarkCompactCollector::RelocateObjects() { | 2071 void MarkCompactCollector::RelocateObjects() { |
2071 #ifdef DEBUG | 2072 #ifdef DEBUG |
(...skipping 24 matching lines...) Expand all Loading... |
2096 ASSERT(live_maps_size == live_map_objects_size_); | 2097 ASSERT(live_maps_size == live_map_objects_size_); |
2097 ASSERT(live_data_olds_size == live_old_data_objects_size_); | 2098 ASSERT(live_data_olds_size == live_old_data_objects_size_); |
2098 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); | 2099 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); |
2099 ASSERT(live_codes_size == live_code_objects_size_); | 2100 ASSERT(live_codes_size == live_code_objects_size_); |
2100 ASSERT(live_cells_size == live_cell_objects_size_); | 2101 ASSERT(live_cells_size == live_cell_objects_size_); |
2101 ASSERT(live_news_size == live_young_objects_size_); | 2102 ASSERT(live_news_size == live_young_objects_size_); |
2102 | 2103 |
2103 // Flip from and to spaces | 2104 // Flip from and to spaces |
2104 Heap::new_space()->Flip(); | 2105 Heap::new_space()->Flip(); |
2105 | 2106 |
| 2107 Heap::new_space()->MCCommitRelocationInfo(); |
| 2108 |
2106 // Set age_mark to bottom in to space | 2109 // Set age_mark to bottom in to space |
2107 Address mark = Heap::new_space()->bottom(); | 2110 Address mark = Heap::new_space()->bottom(); |
2108 Heap::new_space()->set_age_mark(mark); | 2111 Heap::new_space()->set_age_mark(mark); |
2109 | 2112 |
2110 Heap::new_space()->MCCommitRelocationInfo(); | |
2111 #ifdef DEBUG | |
2112 // It is safe to write to the remembered sets as remembered sets on a | |
2113 // page-by-page basis after committing the m-c forwarding pointer. | |
2114 Page::set_rset_state(Page::IN_USE); | |
2115 #endif | |
2116 PagedSpaces spaces; | 2113 PagedSpaces spaces; |
2117 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next()) | 2114 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next()) |
2118 space->MCCommitRelocationInfo(); | 2115 space->MCCommitRelocationInfo(); |
2119 | 2116 |
2120 Heap::CheckNewSpaceExpansionCriteria(); | 2117 Heap::CheckNewSpaceExpansionCriteria(); |
2121 Heap::IncrementYoungSurvivorsCounter(live_news_size); | 2118 Heap::IncrementYoungSurvivorsCounter(live_news_size); |
2122 } | 2119 } |
2123 | 2120 |
2124 | 2121 |
2125 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) { | 2122 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) { |
2126 // Recover map pointer. | 2123 // Recover map pointer. |
2127 MapWord encoding = obj->map_word(); | 2124 MapWord encoding = obj->map_word(); |
2128 Address map_addr = encoding.DecodeMapAddress(Heap::map_space()); | 2125 Address map_addr = encoding.DecodeMapAddress(Heap::map_space()); |
2129 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr))); | 2126 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr))); |
2130 | 2127 |
2131 // Get forwarding address before resetting map pointer | 2128 // Get forwarding address before resetting map pointer |
2132 Address new_addr = GetForwardingAddressInOldSpace(obj); | 2129 Address new_addr = GetForwardingAddressInOldSpace(obj); |
2133 | 2130 |
2134 // Reset map pointer. The meta map object may not be copied yet so | 2131 // Reset map pointer. The meta map object may not be copied yet so |
2135 // Map::cast does not yet work. | 2132 // Map::cast does not yet work. |
2136 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr))); | 2133 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr))); |
2137 | 2134 |
2138 Address old_addr = obj->address(); | 2135 Address old_addr = obj->address(); |
2139 | 2136 |
2140 if (new_addr != old_addr) { | 2137 if (new_addr != old_addr) { |
2141 // Move contents. | 2138 // Move contents. |
2142 Heap::MoveBlock(reinterpret_cast<Object**>(new_addr), | 2139 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr, |
2143 reinterpret_cast<Object**>(old_addr), | 2140 old_addr, |
2144 Map::kSize); | 2141 Map::kSize); |
2145 } | 2142 } |
2146 | 2143 |
2147 #ifdef DEBUG | 2144 #ifdef DEBUG |
2148 if (FLAG_gc_verbose) { | 2145 if (FLAG_gc_verbose) { |
2149 PrintF("relocate %p -> %p\n", old_addr, new_addr); | 2146 PrintF("relocate %p -> %p\n", old_addr, new_addr); |
2150 } | 2147 } |
2151 #endif | 2148 #endif |
2152 | 2149 |
2153 return Map::kSize; | 2150 return Map::kSize; |
2154 } | 2151 } |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2191 // Get forwarding address before resetting map pointer. | 2188 // Get forwarding address before resetting map pointer. |
2192 Address new_addr = GetForwardingAddressInOldSpace(obj); | 2189 Address new_addr = GetForwardingAddressInOldSpace(obj); |
2193 | 2190 |
2194 // Reset the map pointer. | 2191 // Reset the map pointer. |
2195 int obj_size = RestoreMap(obj, space, new_addr, map_addr); | 2192 int obj_size = RestoreMap(obj, space, new_addr, map_addr); |
2196 | 2193 |
2197 Address old_addr = obj->address(); | 2194 Address old_addr = obj->address(); |
2198 | 2195 |
2199 if (new_addr != old_addr) { | 2196 if (new_addr != old_addr) { |
2200 // Move contents. | 2197 // Move contents. |
2201 Heap::MoveBlock(reinterpret_cast<Object**>(new_addr), | 2198 if (space == Heap::old_data_space()) { |
2202 reinterpret_cast<Object**>(old_addr), | 2199 Heap::MoveBlock(new_addr, old_addr, obj_size); |
2203 obj_size); | 2200 } else { |
| 2201 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr, |
| 2202 old_addr, |
| 2203 obj_size); |
| 2204 } |
2204 } | 2205 } |
2205 | 2206 |
2206 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode()); | 2207 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode()); |
2207 | 2208 |
2208 HeapObject* copied_to = HeapObject::FromAddress(new_addr); | 2209 HeapObject* copied_to = HeapObject::FromAddress(new_addr); |
2209 if (copied_to->IsJSFunction()) { | 2210 if (copied_to->IsJSFunction()) { |
2210 PROFILE(FunctionMoveEvent(old_addr, new_addr)); | 2211 PROFILE(FunctionMoveEvent(old_addr, new_addr)); |
2211 } | 2212 } |
2212 | 2213 |
2213 return obj_size; | 2214 return obj_size; |
(...skipping 24 matching lines...) Expand all Loading... |
2238 // Get forwarding address before resetting map pointer | 2239 // Get forwarding address before resetting map pointer |
2239 Address new_addr = GetForwardingAddressInOldSpace(obj); | 2240 Address new_addr = GetForwardingAddressInOldSpace(obj); |
2240 | 2241 |
2241 // Reset the map pointer. | 2242 // Reset the map pointer. |
2242 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr); | 2243 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr); |
2243 | 2244 |
2244 Address old_addr = obj->address(); | 2245 Address old_addr = obj->address(); |
2245 | 2246 |
2246 if (new_addr != old_addr) { | 2247 if (new_addr != old_addr) { |
2247 // Move contents. | 2248 // Move contents. |
2248 Heap::MoveBlock(reinterpret_cast<Object**>(new_addr), | 2249 Heap::MoveBlock(new_addr, old_addr, obj_size); |
2249 reinterpret_cast<Object**>(old_addr), | |
2250 obj_size); | |
2251 } | 2250 } |
2252 | 2251 |
2253 HeapObject* copied_to = HeapObject::FromAddress(new_addr); | 2252 HeapObject* copied_to = HeapObject::FromAddress(new_addr); |
2254 if (copied_to->IsCode()) { | 2253 if (copied_to->IsCode()) { |
2255 // May also update inline cache target. | 2254 // May also update inline cache target. |
2256 Code::cast(copied_to)->Relocate(new_addr - old_addr); | 2255 Code::cast(copied_to)->Relocate(new_addr - old_addr); |
2257 // Notify the logger that compiled code has moved. | 2256 // Notify the logger that compiled code has moved. |
2258 PROFILE(CodeMoveEvent(old_addr, new_addr)); | 2257 PROFILE(CodeMoveEvent(old_addr, new_addr)); |
2259 } | 2258 } |
2260 | 2259 |
(...skipping 15 matching lines...) Expand all Loading... |
2276 if (Heap::new_space()->FromSpaceContains(new_addr)) { | 2275 if (Heap::new_space()->FromSpaceContains(new_addr)) { |
2277 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <= | 2276 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <= |
2278 Heap::new_space()->ToSpaceOffsetForAddress(old_addr)); | 2277 Heap::new_space()->ToSpaceOffsetForAddress(old_addr)); |
2279 } else { | 2278 } else { |
2280 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() || | 2279 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() || |
2281 Heap::TargetSpace(obj) == Heap::old_data_space()); | 2280 Heap::TargetSpace(obj) == Heap::old_data_space()); |
2282 } | 2281 } |
2283 #endif | 2282 #endif |
2284 | 2283 |
2285 // New and old addresses cannot overlap. | 2284 // New and old addresses cannot overlap. |
2286 Heap::CopyBlock(reinterpret_cast<Object**>(new_addr), | 2285 if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) { |
2287 reinterpret_cast<Object**>(old_addr), | 2286 Heap::CopyBlock(new_addr, old_addr, obj_size); |
2288 obj_size); | 2287 } else { |
| 2288 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr, |
| 2289 old_addr, |
| 2290 obj_size); |
| 2291 } |
2289 | 2292 |
2290 #ifdef DEBUG | 2293 #ifdef DEBUG |
2291 if (FLAG_gc_verbose) { | 2294 if (FLAG_gc_verbose) { |
2292 PrintF("relocate %p -> %p\n", old_addr, new_addr); | 2295 PrintF("relocate %p -> %p\n", old_addr, new_addr); |
2293 } | 2296 } |
2294 #endif | 2297 #endif |
2295 | 2298 |
2296 HeapObject* copied_to = HeapObject::FromAddress(new_addr); | 2299 HeapObject* copied_to = HeapObject::FromAddress(new_addr); |
2297 if (copied_to->IsJSFunction()) { | 2300 if (copied_to->IsJSFunction()) { |
2298 PROFILE(FunctionMoveEvent(old_addr, new_addr)); | 2301 PROFILE(FunctionMoveEvent(old_addr, new_addr)); |
2299 } | 2302 } |
2300 | 2303 |
2301 return obj_size; | 2304 return obj_size; |
2302 } | 2305 } |
2303 | 2306 |
2304 | 2307 |
2305 // ------------------------------------------------------------------------- | |
2306 // Phase 5: rebuild remembered sets | |
2307 | |
2308 void MarkCompactCollector::RebuildRSets() { | |
2309 #ifdef DEBUG | |
2310 ASSERT(state_ == RELOCATE_OBJECTS); | |
2311 state_ = REBUILD_RSETS; | |
2312 #endif | |
2313 Heap::RebuildRSets(); | |
2314 } | |
2315 | |
2316 | |
2317 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) { | 2308 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) { |
2318 #ifdef ENABLE_LOGGING_AND_PROFILING | 2309 #ifdef ENABLE_LOGGING_AND_PROFILING |
2319 if (obj->IsCode()) { | 2310 if (obj->IsCode()) { |
2320 PROFILE(CodeDeleteEvent(obj->address())); | 2311 PROFILE(CodeDeleteEvent(obj->address())); |
2321 } else if (obj->IsJSFunction()) { | 2312 } else if (obj->IsJSFunction()) { |
2322 PROFILE(FunctionDeleteEvent(obj->address())); | 2313 PROFILE(FunctionDeleteEvent(obj->address())); |
2323 } | 2314 } |
2324 #endif | 2315 #endif |
2325 } | 2316 } |
2326 | 2317 |
2327 } } // namespace v8::internal | 2318 } } // namespace v8::internal |
OLD | NEW |