Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(96)

Side by Side Diff: src/mark-compact.cc

Issue 2274001: Revert r4715. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mark-compact.h ('k') | src/objects.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
77 77
78 SweepLargeObjectSpace(); 78 SweepLargeObjectSpace();
79 79
80 if (IsCompacting()) { 80 if (IsCompacting()) {
81 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT); 81 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
82 EncodeForwardingAddresses(); 82 EncodeForwardingAddresses();
83 83
84 UpdatePointers(); 84 UpdatePointers();
85 85
86 RelocateObjects(); 86 RelocateObjects();
87
88 RebuildRSets();
89
87 } else { 90 } else {
88 SweepSpaces(); 91 SweepSpaces();
89 } 92 }
90 93
91 Finish(); 94 Finish();
92 95
93 // Save the count of marked objects remaining after the collection and 96 // Save the count of marked objects remaining after the collection and
94 // null out the GC tracer. 97 // null out the GC tracer.
95 previous_marked_count_ = tracer_->marked_count(); 98 previous_marked_count_ = tracer_->marked_count();
96 ASSERT(previous_marked_count_ == 0); 99 ASSERT(previous_marked_count_ == 0);
(...skipping 14 matching lines...) Expand all
111 114
112 compacting_collection_ = 115 compacting_collection_ =
113 FLAG_always_compact || force_compaction_ || compact_on_next_gc_; 116 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
114 compact_on_next_gc_ = false; 117 compact_on_next_gc_ = false;
115 118
116 if (FLAG_never_compact) compacting_collection_ = false; 119 if (FLAG_never_compact) compacting_collection_ = false;
117 if (!Heap::map_space()->MapPointersEncodable()) 120 if (!Heap::map_space()->MapPointersEncodable())
118 compacting_collection_ = false; 121 compacting_collection_ = false;
119 if (FLAG_collect_maps) CreateBackPointers(); 122 if (FLAG_collect_maps) CreateBackPointers();
120 123
124 #ifdef DEBUG
125 if (compacting_collection_) {
126 // We will write bookkeeping information to the remembered set area
127 // starting now.
128 Page::set_rset_state(Page::NOT_IN_USE);
129 }
130 #endif
131
121 PagedSpaces spaces; 132 PagedSpaces spaces;
122 for (PagedSpace* space = spaces.next(); 133 for (PagedSpace* space = spaces.next();
123 space != NULL; space = spaces.next()) { 134 space != NULL; space = spaces.next()) {
124 space->PrepareForMarkCompact(compacting_collection_); 135 space->PrepareForMarkCompact(compacting_collection_);
125 } 136 }
126 137
127 #ifdef DEBUG 138 #ifdef DEBUG
128 live_bytes_ = 0; 139 live_bytes_ = 0;
129 live_young_objects_size_ = 0; 140 live_young_objects_size_ = 0;
130 live_old_pointer_objects_size_ = 0; 141 live_old_pointer_objects_size_ = 0;
131 live_old_data_objects_size_ = 0; 142 live_old_data_objects_size_ = 0;
132 live_code_objects_size_ = 0; 143 live_code_objects_size_ = 0;
133 live_map_objects_size_ = 0; 144 live_map_objects_size_ = 0;
134 live_cell_objects_size_ = 0; 145 live_cell_objects_size_ = 0;
135 live_lo_objects_size_ = 0; 146 live_lo_objects_size_ = 0;
136 #endif 147 #endif
137 } 148 }
138 149
139 150
140 void MarkCompactCollector::Finish() { 151 void MarkCompactCollector::Finish() {
141 #ifdef DEBUG 152 #ifdef DEBUG
142 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); 153 ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
143 state_ = IDLE; 154 state_ = IDLE;
144 #endif 155 #endif
145 // The stub cache is not traversed during GC; clear the cache to 156 // The stub cache is not traversed during GC; clear the cache to
146 // force lazy re-initialization of it. This must be done after the 157 // force lazy re-initialization of it. This must be done after the
147 // GC, because it relies on the new address of certain old space 158 // GC, because it relies on the new address of certain old space
148 // objects (empty string, illegal builtin). 159 // objects (empty string, illegal builtin).
149 StubCache::Clear(); 160 StubCache::Clear();
150 161
151 ExternalStringTable::CleanUp(); 162 ExternalStringTable::CleanUp();
152 163
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
226 map_word.ClearMark(); 237 map_word.ClearMark();
227 InstanceType type = map_word.ToMap()->instance_type(); 238 InstanceType type = map_word.ToMap()->instance_type();
228 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; 239 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
229 240
230 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); 241 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
231 if (second != Heap::raw_unchecked_empty_string()) { 242 if (second != Heap::raw_unchecked_empty_string()) {
232 return object; 243 return object;
233 } 244 }
234 245
235 // Since we don't have the object's start, it is impossible to update the 246 // Since we don't have the object's start, it is impossible to update the
236 // page dirty marks. Therefore, we only replace the string with its left 247 // remembered set. Therefore, we only replace the string with its left
237 // substring when page dirty marks do not change. 248 // substring when the remembered set does not change.
238 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); 249 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
239 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object; 250 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
240 251
241 *p = first; 252 *p = first;
242 return HeapObject::cast(first); 253 return HeapObject::cast(first);
243 } 254 }
244 255
245 256
246 // Helper class for marking pointers in HeapObjects. 257 // Helper class for marking pointers in HeapObjects.
247 class MarkingVisitor : public ObjectVisitor { 258 class MarkingVisitor : public ObjectVisitor {
(...skipping 510 matching lines...) Expand 10 before | Expand all | Expand 10 after
758 void MarkCompactCollector::SweepLargeObjectSpace() { 769 void MarkCompactCollector::SweepLargeObjectSpace() {
759 #ifdef DEBUG 770 #ifdef DEBUG
760 ASSERT(state_ == MARK_LIVE_OBJECTS); 771 ASSERT(state_ == MARK_LIVE_OBJECTS);
761 state_ = 772 state_ =
762 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES; 773 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
763 #endif 774 #endif
764 // Deallocate unmarked objects and clear marked bits for marked objects. 775 // Deallocate unmarked objects and clear marked bits for marked objects.
765 Heap::lo_space()->FreeUnmarkedObjects(); 776 Heap::lo_space()->FreeUnmarkedObjects();
766 } 777 }
767 778
768
769 // Safe to use during marking phase only. 779 // Safe to use during marking phase only.
770 bool MarkCompactCollector::SafeIsMap(HeapObject* object) { 780 bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
771 MapWord metamap = object->map_word(); 781 MapWord metamap = object->map_word();
772 metamap.ClearMark(); 782 metamap.ClearMark();
773 return metamap.ToMap()->instance_type() == MAP_TYPE; 783 return metamap.ToMap()->instance_type() == MAP_TYPE;
774 } 784 }
775 785
776
777 void MarkCompactCollector::ClearNonLiveTransitions() { 786 void MarkCompactCollector::ClearNonLiveTransitions() {
778 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback); 787 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
779 // Iterate over the map space, setting map transitions that go from 788 // Iterate over the map space, setting map transitions that go from
780 // a marked map to an unmarked map to null transitions. At the same time, 789 // a marked map to an unmarked map to null transitions. At the same time,
781 // set all the prototype fields of maps back to their original value, 790 // set all the prototype fields of maps back to their original value,
782 // dropping the back pointers temporarily stored in the prototype field. 791 // dropping the back pointers temporarily stored in the prototype field.
783 // Setting the prototype field requires following the linked list of 792 // Setting the prototype field requires following the linked list of
784 // back pointers, reversing them all at once. This allows us to find 793 // back pointers, reversing them all at once. This allows us to find
785 // those maps with map transitions that need to be nulled, and only 794 // those maps with map transitions that need to be nulled, and only
786 // scan the descriptor arrays of those maps, not all maps. 795 // scan the descriptor arrays of those maps, not all maps.
(...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after
1062 } 1071 }
1063 1072
1064 1073
1065 // We scavange new space simultaneously with sweeping. This is done in two 1074 // We scavange new space simultaneously with sweeping. This is done in two
1066 // passes. 1075 // passes.
1067 // The first pass migrates all alive objects from one semispace to another or 1076 // The first pass migrates all alive objects from one semispace to another or
1068 // promotes them to old space. Forwading address is written directly into 1077 // promotes them to old space. Forwading address is written directly into
1069 // first word of object without any encoding. If object is dead we are writing 1078 // first word of object without any encoding. If object is dead we are writing
1070 // NULL as a forwarding address. 1079 // NULL as a forwarding address.
1071 // The second pass updates pointers to new space in all spaces. It is possible 1080 // The second pass updates pointers to new space in all spaces. It is possible
1072 // to encounter pointers to dead objects during traversal of dirty regions we 1081 // to encounter pointers to dead objects during traversal of remembered set for
1073 // should clear them to avoid encountering them during next dirty regions 1082 // map space because remembered set bits corresponding to dead maps are cleared
1074 // iteration. 1083 // later during map space sweeping.
1075 static void MigrateObject(Address dst, 1084 static void MigrateObject(Address dst, Address src, int size) {
1076 Address src, 1085 Heap::CopyBlock(reinterpret_cast<Object**>(dst),
1077 int size, 1086 reinterpret_cast<Object**>(src),
1078 bool to_old_space) { 1087 size);
1079 if (to_old_space) {
1080 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
1081 } else {
1082 Heap::CopyBlock(dst, src, size);
1083 }
1084 1088
1085 Memory::Address_at(src) = dst; 1089 Memory::Address_at(src) = dst;
1086 } 1090 }
1087 1091
1088 1092
1089 // Visitor for updating pointers from live objects in old spaces to new space. 1093 // Visitor for updating pointers from live objects in old spaces to new space.
1090 // It does not expect to encounter pointers to dead objects. 1094 // It does not expect to encounter pointers to dead objects.
1091 class PointersToNewGenUpdatingVisitor: public ObjectVisitor { 1095 class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1092 public: 1096 public:
1093 void VisitPointer(Object** p) { 1097 void VisitPointer(Object** p) {
(...skipping 26 matching lines...) Expand all
1120 HeapObject* obj = HeapObject::cast(*p); 1124 HeapObject* obj = HeapObject::cast(*p);
1121 Address old_addr = obj->address(); 1125 Address old_addr = obj->address();
1122 1126
1123 if (Heap::new_space()->Contains(obj)) { 1127 if (Heap::new_space()->Contains(obj)) {
1124 ASSERT(Heap::InFromSpace(*p)); 1128 ASSERT(Heap::InFromSpace(*p));
1125 *p = HeapObject::FromAddress(Memory::Address_at(old_addr)); 1129 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1126 } 1130 }
1127 } 1131 }
1128 }; 1132 };
1129 1133
1130
1131 // Visitor for updating pointers from live objects in old spaces to new space. 1134 // Visitor for updating pointers from live objects in old spaces to new space.
1132 // It can encounter pointers to dead objects in new space when traversing map 1135 // It can encounter pointers to dead objects in new space when traversing map
1133 // space (see comment for MigrateObject). 1136 // space (see comment for MigrateObject).
1134 static void UpdatePointerToNewGen(HeapObject** p) { 1137 static void UpdatePointerToNewGen(HeapObject** p) {
1135 if (!(*p)->IsHeapObject()) return; 1138 if (!(*p)->IsHeapObject()) return;
1136 1139
1137 Address old_addr = (*p)->address(); 1140 Address old_addr = (*p)->address();
1138 ASSERT(Heap::InFromSpace(*p)); 1141 ASSERT(Heap::InFromSpace(*p));
1139 1142
1140 Address new_addr = Memory::Address_at(old_addr); 1143 Address new_addr = Memory::Address_at(old_addr);
1141 1144
1142 if (new_addr == NULL) { 1145 // Object pointed by *p is dead. Update is not required.
1143 // We encountered pointer to a dead object. Clear it so we will 1146 if (new_addr == NULL) return;
1144 // not visit it again during next iteration of dirty regions. 1147
1145 *p = NULL; 1148 *p = HeapObject::FromAddress(new_addr);
1146 } else {
1147 *p = HeapObject::FromAddress(new_addr);
1148 }
1149 } 1149 }
1150 1150
1151 1151
1152 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) { 1152 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
1153 Address old_addr = HeapObject::cast(*p)->address(); 1153 Address old_addr = HeapObject::cast(*p)->address();
1154 Address new_addr = Memory::Address_at(old_addr); 1154 Address new_addr = Memory::Address_at(old_addr);
1155 return String::cast(HeapObject::FromAddress(new_addr)); 1155 return String::cast(HeapObject::FromAddress(new_addr));
1156 } 1156 }
1157 1157
1158 1158
1159 static bool TryPromoteObject(HeapObject* object, int object_size) { 1159 static bool TryPromoteObject(HeapObject* object, int object_size) {
1160 Object* result; 1160 Object* result;
1161 1161
1162 if (object_size > Heap::MaxObjectSizeInPagedSpace()) { 1162 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1163 result = Heap::lo_space()->AllocateRawFixedArray(object_size); 1163 result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1164 if (!result->IsFailure()) { 1164 if (!result->IsFailure()) {
1165 HeapObject* target = HeapObject::cast(result); 1165 HeapObject* target = HeapObject::cast(result);
1166 MigrateObject(target->address(), object->address(), object_size, true); 1166 MigrateObject(target->address(), object->address(), object_size);
1167 Heap::UpdateRSet(target);
1167 MarkCompactCollector::tracer()-> 1168 MarkCompactCollector::tracer()->
1168 increment_promoted_objects_size(object_size); 1169 increment_promoted_objects_size(object_size);
1169 return true; 1170 return true;
1170 } 1171 }
1171 } else { 1172 } else {
1172 OldSpace* target_space = Heap::TargetSpace(object); 1173 OldSpace* target_space = Heap::TargetSpace(object);
1173 1174
1174 ASSERT(target_space == Heap::old_pointer_space() || 1175 ASSERT(target_space == Heap::old_pointer_space() ||
1175 target_space == Heap::old_data_space()); 1176 target_space == Heap::old_data_space());
1176 result = target_space->AllocateRaw(object_size); 1177 result = target_space->AllocateRaw(object_size);
1177 if (!result->IsFailure()) { 1178 if (!result->IsFailure()) {
1178 HeapObject* target = HeapObject::cast(result); 1179 HeapObject* target = HeapObject::cast(result);
1179 MigrateObject(target->address(), 1180 MigrateObject(target->address(), object->address(), object_size);
1180 object->address(), 1181 if (target_space == Heap::old_pointer_space()) {
1181 object_size, 1182 Heap::UpdateRSet(target);
1182 target_space == Heap::old_pointer_space()); 1183 }
1183 MarkCompactCollector::tracer()-> 1184 MarkCompactCollector::tracer()->
1184 increment_promoted_objects_size(object_size); 1185 increment_promoted_objects_size(object_size);
1185 return true; 1186 return true;
1186 } 1187 }
1187 } 1188 }
1188 1189
1189 return false; 1190 return false;
1190 } 1191 }
1191 1192
1192 1193
(...skipping 21 matching lines...) Expand all
1214 MarkCompactCollector::tracer()->decrement_marked_count(); 1215 MarkCompactCollector::tracer()->decrement_marked_count();
1215 1216
1216 size = object->Size(); 1217 size = object->Size();
1217 survivors_size += size; 1218 survivors_size += size;
1218 1219
1219 // Aggressively promote young survivors to the old space. 1220 // Aggressively promote young survivors to the old space.
1220 if (TryPromoteObject(object, size)) { 1221 if (TryPromoteObject(object, size)) {
1221 continue; 1222 continue;
1222 } 1223 }
1223 1224
1224 // Promotion failed. Just migrate object to another semispace. 1225 // Promotion either failed or not required.
1226 // Copy the content of the object.
1225 Object* target = space->AllocateRaw(size); 1227 Object* target = space->AllocateRaw(size);
1226 1228
1227 // Allocation cannot fail at this point: semispaces are of equal size. 1229 // Allocation cannot fail at this point: semispaces are of equal size.
1228 ASSERT(!target->IsFailure()); 1230 ASSERT(!target->IsFailure());
1229 1231
1230 MigrateObject(HeapObject::cast(target)->address(), 1232 MigrateObject(HeapObject::cast(target)->address(), current, size);
1231 current,
1232 size,
1233 false);
1234 } else { 1233 } else {
1235 size = object->Size(); 1234 size = object->Size();
1236 Memory::Address_at(current) = NULL; 1235 Memory::Address_at(current) = NULL;
1237 } 1236 }
1238 } 1237 }
1239 1238
1240 // Second pass: find pointers to new space and update them. 1239 // Second pass: find pointers to new space and update them.
1241 PointersToNewGenUpdatingVisitor updating_visitor; 1240 PointersToNewGenUpdatingVisitor updating_visitor;
1242 1241
1243 // Update pointers in to space. 1242 // Update pointers in to space.
1244 HeapObject* object; 1243 HeapObject* object;
1245 for (Address current = space->bottom(); 1244 for (Address current = space->bottom();
1246 current < space->top(); 1245 current < space->top();
1247 current += object->Size()) { 1246 current += object->Size()) {
1248 object = HeapObject::FromAddress(current); 1247 object = HeapObject::FromAddress(current);
1249 1248
1250 object->IterateBody(object->map()->instance_type(), 1249 object->IterateBody(object->map()->instance_type(),
1251 object->Size(), 1250 object->Size(),
1252 &updating_visitor); 1251 &updating_visitor);
1253 } 1252 }
1254 1253
1255 // Update roots. 1254 // Update roots.
1256 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE); 1255 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
1257 1256
1258 // Update pointers in old spaces. 1257 // Update pointers in old spaces.
1259 Heap::IterateDirtyRegions(Heap::old_pointer_space(), 1258 Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen);
1260 &Heap::IteratePointersInDirtyRegion, 1259 Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen);
1261 &UpdatePointerToNewGen, 1260 Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen);
1262 Heap::WATERMARK_SHOULD_BE_VALID);
1263
1264 Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
1265 1261
1266 // Update pointers from cells. 1262 // Update pointers from cells.
1267 HeapObjectIterator cell_iterator(Heap::cell_space()); 1263 HeapObjectIterator cell_iterator(Heap::cell_space());
1268 for (HeapObject* cell = cell_iterator.next(); 1264 for (HeapObject* cell = cell_iterator.next();
1269 cell != NULL; 1265 cell != NULL;
1270 cell = cell_iterator.next()) { 1266 cell = cell_iterator.next()) {
1271 if (cell->IsJSGlobalPropertyCell()) { 1267 if (cell->IsJSGlobalPropertyCell()) {
1272 Address value_address = 1268 Address value_address =
1273 reinterpret_cast<Address>(cell) + 1269 reinterpret_cast<Address>(cell) +
1274 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 1270 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1320 1316
1321 for (Address current = p->ObjectAreaStart(); 1317 for (Address current = p->ObjectAreaStart();
1322 current < p->AllocationTop(); 1318 current < p->AllocationTop();
1323 current += object->Size()) { 1319 current += object->Size()) {
1324 object = HeapObject::FromAddress(current); 1320 object = HeapObject::FromAddress(current);
1325 if (object->IsMarked()) { 1321 if (object->IsMarked()) {
1326 object->ClearMark(); 1322 object->ClearMark();
1327 MarkCompactCollector::tracer()->decrement_marked_count(); 1323 MarkCompactCollector::tracer()->decrement_marked_count();
1328 1324
1329 if (!is_previous_alive) { // Transition from free to live. 1325 if (!is_previous_alive) { // Transition from free to live.
1330 dealloc(free_start, 1326 dealloc(free_start, static_cast<int>(current - free_start), true);
1331 static_cast<int>(current - free_start),
1332 true,
1333 false);
1334 is_previous_alive = true; 1327 is_previous_alive = true;
1335 } 1328 }
1336 } else { 1329 } else {
1337 MarkCompactCollector::ReportDeleteIfNeeded(object); 1330 MarkCompactCollector::ReportDeleteIfNeeded(object);
1338 if (is_previous_alive) { // Transition from live to free. 1331 if (is_previous_alive) { // Transition from live to free.
1339 free_start = current; 1332 free_start = current;
1340 is_previous_alive = false; 1333 is_previous_alive = false;
1341 } 1334 }
1342 } 1335 }
1343 // The object is now unmarked for the call to Size() at the top of the 1336 // The object is now unmarked for the call to Size() at the top of the
1344 // loop. 1337 // loop.
1345 } 1338 }
1346 1339
1347 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop()) 1340 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
1348 || (!is_previous_alive && free_start == p->ObjectAreaStart()); 1341 || (!is_previous_alive && free_start == p->ObjectAreaStart());
1349 1342
1350 if (page_is_empty) { 1343 if (page_is_empty) {
1351 // This page is empty. Check whether we are in the middle of 1344 // This page is empty. Check whether we are in the middle of
1352 // sequence of empty pages and start one if not. 1345 // sequence of empty pages and start one if not.
1353 if (!first_empty_page->is_valid()) { 1346 if (!first_empty_page->is_valid()) {
1354 first_empty_page = p; 1347 first_empty_page = p;
1355 prec_first_empty_page = prev; 1348 prec_first_empty_page = prev;
1356 } 1349 }
1357 1350
1358 if (!is_previous_alive) { 1351 if (!is_previous_alive) {
1359 // There are dead objects on this page. Update space accounting stats 1352 // There are dead objects on this page. Update space accounting stats
1360 // without putting anything into free list. 1353 // without putting anything into free list.
1361 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start); 1354 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
1362 if (size_in_bytes > 0) { 1355 if (size_in_bytes > 0) {
1363 dealloc(free_start, size_in_bytes, false, true); 1356 dealloc(free_start, size_in_bytes, false);
1364 } 1357 }
1365 } 1358 }
1366 } else { 1359 } else {
1367 // This page is not empty. Sequence of empty pages ended on the previous 1360 // This page is not empty. Sequence of empty pages ended on the previous
1368 // one. 1361 // one.
1369 if (first_empty_page->is_valid()) { 1362 if (first_empty_page->is_valid()) {
1370 space->FreePages(prec_first_empty_page, prev); 1363 space->FreePages(prec_first_empty_page, prev);
1371 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL); 1364 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
1372 } 1365 }
1373 1366
1374 // If there is a free ending area on one of the previous pages we have 1367 // If there is a free ending area on one of the previous pages we have
1375 // deallocate that area and put it on the free list. 1368 // deallocate that area and put it on the free list.
1376 if (last_free_size > 0) { 1369 if (last_free_size > 0) {
1377 Page::FromAddress(last_free_start)-> 1370 dealloc(last_free_start, last_free_size, true);
1378 SetAllocationWatermark(last_free_start);
1379 dealloc(last_free_start, last_free_size, true, true);
1380 last_free_start = NULL; 1371 last_free_start = NULL;
1381 last_free_size = 0; 1372 last_free_size = 0;
1382 } 1373 }
1383 1374
1384 // If the last region of this page was not live we remember it. 1375 // If the last region of this page was not live we remember it.
1385 if (!is_previous_alive) { 1376 if (!is_previous_alive) {
1386 ASSERT(last_free_size == 0); 1377 ASSERT(last_free_size == 0);
1387 last_free_size = static_cast<int>(p->AllocationTop() - free_start); 1378 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
1388 last_free_start = free_start; 1379 last_free_start = free_start;
1389 } 1380 }
(...skipping 10 matching lines...) Expand all
1400 // to the beginning of first empty page. 1391 // to the beginning of first empty page.
1401 ASSERT(prev == space->AllocationTopPage()); 1392 ASSERT(prev == space->AllocationTopPage());
1402 1393
1403 new_allocation_top = first_empty_page->ObjectAreaStart(); 1394 new_allocation_top = first_empty_page->ObjectAreaStart();
1404 } 1395 }
1405 1396
1406 if (last_free_size > 0) { 1397 if (last_free_size > 0) {
1407 // There was a free ending area on the previous page. 1398 // There was a free ending area on the previous page.
1408 // Deallocate it without putting it into freelist and move allocation 1399 // Deallocate it without putting it into freelist and move allocation
1409 // top to the beginning of this free area. 1400 // top to the beginning of this free area.
1410 dealloc(last_free_start, last_free_size, false, true); 1401 dealloc(last_free_start, last_free_size, false);
1411 new_allocation_top = last_free_start; 1402 new_allocation_top = last_free_start;
1412 } 1403 }
1413 1404
1414 if (new_allocation_top != NULL) { 1405 if (new_allocation_top != NULL) {
1415 #ifdef DEBUG 1406 #ifdef DEBUG
1416 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top); 1407 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
1417 if (!first_empty_page->is_valid()) { 1408 if (!first_empty_page->is_valid()) {
1418 ASSERT(new_allocation_top_page == space->AllocationTopPage()); 1409 ASSERT(new_allocation_top_page == space->AllocationTopPage());
1419 } else if (last_free_size > 0) { 1410 } else if (last_free_size > 0) {
1420 ASSERT(new_allocation_top_page == prec_first_empty_page); 1411 ASSERT(new_allocation_top_page == prec_first_empty_page);
1421 } else { 1412 } else {
1422 ASSERT(new_allocation_top_page == first_empty_page); 1413 ASSERT(new_allocation_top_page == first_empty_page);
1423 } 1414 }
1424 #endif 1415 #endif
1425 1416
1426 space->SetTop(new_allocation_top); 1417 space->SetTop(new_allocation_top);
1427 } 1418 }
1428 } 1419 }
1429 1420
1430 1421
1431 void MarkCompactCollector::DeallocateOldPointerBlock(Address start, 1422 void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
1432 int size_in_bytes, 1423 int size_in_bytes,
1433 bool add_to_freelist, 1424 bool add_to_freelist) {
1434 bool last_on_page) { 1425 Heap::ClearRSetRange(start, size_in_bytes);
1435 Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist); 1426 Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
1436 } 1427 }
1437 1428
1438 1429
1439 void MarkCompactCollector::DeallocateOldDataBlock(Address start, 1430 void MarkCompactCollector::DeallocateOldDataBlock(Address start,
1440 int size_in_bytes, 1431 int size_in_bytes,
1441 bool add_to_freelist, 1432 bool add_to_freelist) {
1442 bool last_on_page) {
1443 Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist); 1433 Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
1444 } 1434 }
1445 1435
1446 1436
1447 void MarkCompactCollector::DeallocateCodeBlock(Address start, 1437 void MarkCompactCollector::DeallocateCodeBlock(Address start,
1448 int size_in_bytes, 1438 int size_in_bytes,
1449 bool add_to_freelist, 1439 bool add_to_freelist) {
1450 bool last_on_page) {
1451 Heap::code_space()->Free(start, size_in_bytes, add_to_freelist); 1440 Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
1452 } 1441 }
1453 1442
1454 1443
1455 void MarkCompactCollector::DeallocateMapBlock(Address start, 1444 void MarkCompactCollector::DeallocateMapBlock(Address start,
1456 int size_in_bytes, 1445 int size_in_bytes,
1457 bool add_to_freelist, 1446 bool add_to_freelist) {
1458 bool last_on_page) {
1459 // Objects in map space are assumed to have size Map::kSize and a 1447 // Objects in map space are assumed to have size Map::kSize and a
1460 // valid map in their first word. Thus, we break the free block up into 1448 // valid map in their first word. Thus, we break the free block up into
1461 // chunks and free them separately. 1449 // chunks and free them separately.
1462 ASSERT(size_in_bytes % Map::kSize == 0); 1450 ASSERT(size_in_bytes % Map::kSize == 0);
1451 Heap::ClearRSetRange(start, size_in_bytes);
1463 Address end = start + size_in_bytes; 1452 Address end = start + size_in_bytes;
1464 for (Address a = start; a < end; a += Map::kSize) { 1453 for (Address a = start; a < end; a += Map::kSize) {
1465 Heap::map_space()->Free(a, add_to_freelist); 1454 Heap::map_space()->Free(a, add_to_freelist);
1466 } 1455 }
1467 } 1456 }
1468 1457
1469 1458
1470 void MarkCompactCollector::DeallocateCellBlock(Address start, 1459 void MarkCompactCollector::DeallocateCellBlock(Address start,
1471 int size_in_bytes, 1460 int size_in_bytes,
1472 bool add_to_freelist, 1461 bool add_to_freelist) {
1473 bool last_on_page) {
1474 // Free-list elements in cell space are assumed to have a fixed size. 1462 // Free-list elements in cell space are assumed to have a fixed size.
1475 // We break the free block into chunks and add them to the free list 1463 // We break the free block into chunks and add them to the free list
1476 // individually. 1464 // individually.
1477 int size = Heap::cell_space()->object_size_in_bytes(); 1465 int size = Heap::cell_space()->object_size_in_bytes();
1478 ASSERT(size_in_bytes % size == 0); 1466 ASSERT(size_in_bytes % size == 0);
1467 Heap::ClearRSetRange(start, size_in_bytes);
1479 Address end = start + size_in_bytes; 1468 Address end = start + size_in_bytes;
1480 for (Address a = start; a < end; a += size) { 1469 for (Address a = start; a < end; a += size) {
1481 Heap::cell_space()->Free(a, add_to_freelist); 1470 Heap::cell_space()->Free(a, add_to_freelist);
1482 } 1471 }
1483 } 1472 }
1484 1473
1485 1474
1486 void MarkCompactCollector::EncodeForwardingAddresses() { 1475 void MarkCompactCollector::EncodeForwardingAddresses() {
1487 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); 1476 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1488 // Objects in the active semispace of the young generation may be 1477 // Objects in the active semispace of the young generation may be
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
1567 #ifdef DEBUG 1556 #ifdef DEBUG
1568 CheckNoMapsToEvacuate(); 1557 CheckNoMapsToEvacuate();
1569 #endif 1558 #endif
1570 } 1559 }
1571 1560
1572 void UpdateMapPointersInRoots() { 1561 void UpdateMapPointersInRoots() {
1573 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG); 1562 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
1574 GlobalHandles::IterateWeakRoots(&map_updating_visitor_); 1563 GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
1575 } 1564 }
1576 1565
1566 void FinishMapSpace() {
1567 // Iterate through to space and finish move.
1568 MapIterator it;
1569 HeapObject* o = it.next();
1570 for (; o != first_map_to_evacuate_; o = it.next()) {
1571 ASSERT(o != NULL);
1572 Map* map = reinterpret_cast<Map*>(o);
1573 ASSERT(!map->IsMarked());
1574 ASSERT(!map->IsOverflowed());
1575 ASSERT(map->IsMap());
1576 Heap::UpdateRSet(map);
1577 }
1578 }
1579
1577 void UpdateMapPointersInPagedSpace(PagedSpace* space) { 1580 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
1578 ASSERT(space != Heap::map_space()); 1581 ASSERT(space != Heap::map_space());
1579 1582
1580 PageIterator it(space, PageIterator::PAGES_IN_USE); 1583 PageIterator it(space, PageIterator::PAGES_IN_USE);
1581 while (it.has_next()) { 1584 while (it.has_next()) {
1582 Page* p = it.next(); 1585 Page* p = it.next();
1583 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop()); 1586 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
1584 } 1587 }
1585 } 1588 }
1586 1589
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
1659 ASSERT(map->IsMap()); 1662 ASSERT(map->IsMap());
1660 return map; 1663 return map;
1661 } 1664 }
1662 1665
1663 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) { 1666 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
1664 ASSERT(FreeListNode::IsFreeListNode(vacant_map)); 1667 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
1665 ASSERT(map_to_evacuate->IsMap()); 1668 ASSERT(map_to_evacuate->IsMap());
1666 1669
1667 ASSERT(Map::kSize % 4 == 0); 1670 ASSERT(Map::kSize % 4 == 0);
1668 1671
1669 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(), 1672 Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()),
1670 map_to_evacuate->address(), 1673 reinterpret_cast<Object**>(map_to_evacuate->address()),
1671 Map::kSize); 1674 Map::kSize);
1672 1675
1673 ASSERT(vacant_map->IsMap()); // Due to memcpy above. 1676 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
1674 1677
1675 MapWord forwarding_map_word = MapWord::FromMap(vacant_map); 1678 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
1676 forwarding_map_word.SetOverflow(); 1679 forwarding_map_word.SetOverflow();
1677 map_to_evacuate->set_map_word(forwarding_map_word); 1680 map_to_evacuate->set_map_word(forwarding_map_word);
1678 1681
1679 ASSERT(map_to_evacuate->map_word().IsOverflowed()); 1682 ASSERT(map_to_evacuate->map_word().IsOverflowed());
1680 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map); 1683 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
1681 } 1684 }
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
1746 // bits and free the nonlive blocks (for old and map spaces). We sweep 1749 // bits and free the nonlive blocks (for old and map spaces). We sweep
1747 // the map space last because freeing non-live maps overwrites them and 1750 // the map space last because freeing non-live maps overwrites them and
1748 // the other spaces rely on possibly non-live maps to get the sizes for 1751 // the other spaces rely on possibly non-live maps to get the sizes for
1749 // non-live objects. 1752 // non-live objects.
1750 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock); 1753 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
1751 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock); 1754 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
1752 SweepSpace(Heap::code_space(), &DeallocateCodeBlock); 1755 SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
1753 SweepSpace(Heap::cell_space(), &DeallocateCellBlock); 1756 SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
1754 SweepNewSpace(Heap::new_space()); 1757 SweepNewSpace(Heap::new_space());
1755 SweepSpace(Heap::map_space(), &DeallocateMapBlock); 1758 SweepSpace(Heap::map_space(), &DeallocateMapBlock);
1756
1757 Heap::IterateDirtyRegions(Heap::map_space(),
1758 &Heap::IteratePointersInDirtyMapsRegion,
1759 &UpdatePointerToNewGen,
1760 Heap::WATERMARK_SHOULD_BE_VALID);
1761
1762 int live_maps_size = Heap::map_space()->Size(); 1759 int live_maps_size = Heap::map_space()->Size();
1763 int live_maps = live_maps_size / Map::kSize; 1760 int live_maps = live_maps_size / Map::kSize;
1764 ASSERT(live_map_objects_size_ == live_maps_size); 1761 ASSERT(live_map_objects_size_ == live_maps_size);
1765 1762
1766 if (Heap::map_space()->NeedsCompaction(live_maps)) { 1763 if (Heap::map_space()->NeedsCompaction(live_maps)) {
1767 MapCompact map_compact(live_maps); 1764 MapCompact map_compact(live_maps);
1768 1765
1769 map_compact.CompactMaps(); 1766 map_compact.CompactMaps();
1770 map_compact.UpdateMapPointersInRoots(); 1767 map_compact.UpdateMapPointersInRoots();
1771 1768
1769 map_compact.FinishMapSpace();
1772 PagedSpaces spaces; 1770 PagedSpaces spaces;
1773 for (PagedSpace* space = spaces.next(); 1771 for (PagedSpace* space = spaces.next();
1774 space != NULL; space = spaces.next()) { 1772 space != NULL; space = spaces.next()) {
1775 if (space == Heap::map_space()) continue; 1773 if (space == Heap::map_space()) continue;
1776 map_compact.UpdateMapPointersInPagedSpace(space); 1774 map_compact.UpdateMapPointersInPagedSpace(space);
1777 } 1775 }
1778 map_compact.UpdateMapPointersInNewSpace(); 1776 map_compact.UpdateMapPointersInNewSpace();
1779 map_compact.UpdateMapPointersInLargeObjectSpace(); 1777 map_compact.UpdateMapPointersInLargeObjectSpace();
1780 1778
1781 map_compact.Finish(); 1779 map_compact.Finish();
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after
2034 Address obj_addr = obj->address(); 2032 Address obj_addr = obj->address();
2035 2033
2036 // Find the first live object's forwarding address. 2034 // Find the first live object's forwarding address.
2037 Page* p = Page::FromAddress(obj_addr); 2035 Page* p = Page::FromAddress(obj_addr);
2038 Address first_forwarded = p->mc_first_forwarded; 2036 Address first_forwarded = p->mc_first_forwarded;
2039 2037
2040 // Page start address of forwarded address. 2038 // Page start address of forwarded address.
2041 Page* forwarded_page = Page::FromAddress(first_forwarded); 2039 Page* forwarded_page = Page::FromAddress(first_forwarded);
2042 int forwarded_offset = forwarded_page->Offset(first_forwarded); 2040 int forwarded_offset = forwarded_page->Offset(first_forwarded);
2043 2041
2044 // Find end of allocation in the page of first_forwarded. 2042 // Find end of allocation of in the page of first_forwarded.
2045 int mc_top_offset = forwarded_page->AllocationWatermarkOffset(); 2043 Address mc_top = forwarded_page->mc_relocation_top;
2044 int mc_top_offset = forwarded_page->Offset(mc_top);
2046 2045
2047 // Check if current object's forward pointer is in the same page 2046 // Check if current object's forward pointer is in the same page
2048 // as the first live object's forwarding pointer 2047 // as the first live object's forwarding pointer
2049 if (forwarded_offset + offset < mc_top_offset) { 2048 if (forwarded_offset + offset < mc_top_offset) {
2050 // In the same page. 2049 // In the same page.
2051 return first_forwarded + offset; 2050 return first_forwarded + offset;
2052 } 2051 }
2053 2052
2054 // Must be in the next page, NOTE: this may cross chunks. 2053 // Must be in the next page, NOTE: this may cross chunks.
2055 Page* next_page = forwarded_page->next_page(); 2054 Page* next_page = forwarded_page->next_page();
2056 ASSERT(next_page->is_valid()); 2055 ASSERT(next_page->is_valid());
2057 2056
2058 offset -= (mc_top_offset - forwarded_offset); 2057 offset -= (mc_top_offset - forwarded_offset);
2059 offset += Page::kObjectStartOffset; 2058 offset += Page::kObjectStartOffset;
2060 2059
2061 ASSERT_PAGE_OFFSET(offset); 2060 ASSERT_PAGE_OFFSET(offset);
2062 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop()); 2061 ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
2063 2062
2064 return next_page->OffsetToAddress(offset); 2063 return next_page->OffsetToAddress(offset);
2065 } 2064 }
2066 2065
2067 2066
2068 // ------------------------------------------------------------------------- 2067 // -------------------------------------------------------------------------
2069 // Phase 4: Relocate objects 2068 // Phase 4: Relocate objects
2070 2069
2071 void MarkCompactCollector::RelocateObjects() { 2070 void MarkCompactCollector::RelocateObjects() {
2072 #ifdef DEBUG 2071 #ifdef DEBUG
(...skipping 24 matching lines...) Expand all
2097 ASSERT(live_maps_size == live_map_objects_size_); 2096 ASSERT(live_maps_size == live_map_objects_size_);
2098 ASSERT(live_data_olds_size == live_old_data_objects_size_); 2097 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2099 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); 2098 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2100 ASSERT(live_codes_size == live_code_objects_size_); 2099 ASSERT(live_codes_size == live_code_objects_size_);
2101 ASSERT(live_cells_size == live_cell_objects_size_); 2100 ASSERT(live_cells_size == live_cell_objects_size_);
2102 ASSERT(live_news_size == live_young_objects_size_); 2101 ASSERT(live_news_size == live_young_objects_size_);
2103 2102
2104 // Flip from and to spaces 2103 // Flip from and to spaces
2105 Heap::new_space()->Flip(); 2104 Heap::new_space()->Flip();
2106 2105
2107 Heap::new_space()->MCCommitRelocationInfo();
2108
2109 // Set age_mark to bottom in to space 2106 // Set age_mark to bottom in to space
2110 Address mark = Heap::new_space()->bottom(); 2107 Address mark = Heap::new_space()->bottom();
2111 Heap::new_space()->set_age_mark(mark); 2108 Heap::new_space()->set_age_mark(mark);
2112 2109
2110 Heap::new_space()->MCCommitRelocationInfo();
2111 #ifdef DEBUG
2112 // It is safe to write to the remembered sets as remembered sets on a
2113 // page-by-page basis after committing the m-c forwarding pointer.
2114 Page::set_rset_state(Page::IN_USE);
2115 #endif
2113 PagedSpaces spaces; 2116 PagedSpaces spaces;
2114 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next()) 2117 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2115 space->MCCommitRelocationInfo(); 2118 space->MCCommitRelocationInfo();
2116 2119
2117 Heap::CheckNewSpaceExpansionCriteria(); 2120 Heap::CheckNewSpaceExpansionCriteria();
2118 Heap::IncrementYoungSurvivorsCounter(live_news_size); 2121 Heap::IncrementYoungSurvivorsCounter(live_news_size);
2119 } 2122 }
2120 2123
2121 2124
2122 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) { 2125 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2123 // Recover map pointer. 2126 // Recover map pointer.
2124 MapWord encoding = obj->map_word(); 2127 MapWord encoding = obj->map_word();
2125 Address map_addr = encoding.DecodeMapAddress(Heap::map_space()); 2128 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2126 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr))); 2129 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2127 2130
2128 // Get forwarding address before resetting map pointer 2131 // Get forwarding address before resetting map pointer
2129 Address new_addr = GetForwardingAddressInOldSpace(obj); 2132 Address new_addr = GetForwardingAddressInOldSpace(obj);
2130 2133
2131 // Reset map pointer. The meta map object may not be copied yet so 2134 // Reset map pointer. The meta map object may not be copied yet so
2132 // Map::cast does not yet work. 2135 // Map::cast does not yet work.
2133 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr))); 2136 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2134 2137
2135 Address old_addr = obj->address(); 2138 Address old_addr = obj->address();
2136 2139
2137 if (new_addr != old_addr) { 2140 if (new_addr != old_addr) {
2138 // Move contents. 2141 // Move contents.
2139 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr, 2142 Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
2140 old_addr, 2143 reinterpret_cast<Object**>(old_addr),
2141 Map::kSize); 2144 Map::kSize);
2142 } 2145 }
2143 2146
2144 #ifdef DEBUG 2147 #ifdef DEBUG
2145 if (FLAG_gc_verbose) { 2148 if (FLAG_gc_verbose) {
2146 PrintF("relocate %p -> %p\n", old_addr, new_addr); 2149 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2147 } 2150 }
2148 #endif 2151 #endif
2149 2152
2150 return Map::kSize; 2153 return Map::kSize;
2151 } 2154 }
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
2188 // Get forwarding address before resetting map pointer. 2191 // Get forwarding address before resetting map pointer.
2189 Address new_addr = GetForwardingAddressInOldSpace(obj); 2192 Address new_addr = GetForwardingAddressInOldSpace(obj);
2190 2193
2191 // Reset the map pointer. 2194 // Reset the map pointer.
2192 int obj_size = RestoreMap(obj, space, new_addr, map_addr); 2195 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2193 2196
2194 Address old_addr = obj->address(); 2197 Address old_addr = obj->address();
2195 2198
2196 if (new_addr != old_addr) { 2199 if (new_addr != old_addr) {
2197 // Move contents. 2200 // Move contents.
2198 if (space == Heap::old_data_space()) { 2201 Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
2199 Heap::MoveBlock(new_addr, old_addr, obj_size); 2202 reinterpret_cast<Object**>(old_addr),
2200 } else { 2203 obj_size);
2201 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2202 old_addr,
2203 obj_size);
2204 }
2205 } 2204 }
2206 2205
2207 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode()); 2206 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2208 2207
2209 HeapObject* copied_to = HeapObject::FromAddress(new_addr); 2208 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2210 if (copied_to->IsJSFunction()) { 2209 if (copied_to->IsJSFunction()) {
2211 PROFILE(FunctionMoveEvent(old_addr, new_addr)); 2210 PROFILE(FunctionMoveEvent(old_addr, new_addr));
2212 } 2211 }
2213 2212
2214 return obj_size; 2213 return obj_size;
(...skipping 24 matching lines...) Expand all
2239 // Get forwarding address before resetting map pointer 2238 // Get forwarding address before resetting map pointer
2240 Address new_addr = GetForwardingAddressInOldSpace(obj); 2239 Address new_addr = GetForwardingAddressInOldSpace(obj);
2241 2240
2242 // Reset the map pointer. 2241 // Reset the map pointer.
2243 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr); 2242 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
2244 2243
2245 Address old_addr = obj->address(); 2244 Address old_addr = obj->address();
2246 2245
2247 if (new_addr != old_addr) { 2246 if (new_addr != old_addr) {
2248 // Move contents. 2247 // Move contents.
2249 Heap::MoveBlock(new_addr, old_addr, obj_size); 2248 Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
2249 reinterpret_cast<Object**>(old_addr),
2250 obj_size);
2250 } 2251 }
2251 2252
2252 HeapObject* copied_to = HeapObject::FromAddress(new_addr); 2253 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2253 if (copied_to->IsCode()) { 2254 if (copied_to->IsCode()) {
2254 // May also update inline cache target. 2255 // May also update inline cache target.
2255 Code::cast(copied_to)->Relocate(new_addr - old_addr); 2256 Code::cast(copied_to)->Relocate(new_addr - old_addr);
2256 // Notify the logger that compiled code has moved. 2257 // Notify the logger that compiled code has moved.
2257 PROFILE(CodeMoveEvent(old_addr, new_addr)); 2258 PROFILE(CodeMoveEvent(old_addr, new_addr));
2258 } 2259 }
2259 2260
(...skipping 15 matching lines...) Expand all
2275 if (Heap::new_space()->FromSpaceContains(new_addr)) { 2276 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2276 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <= 2277 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2277 Heap::new_space()->ToSpaceOffsetForAddress(old_addr)); 2278 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2278 } else { 2279 } else {
2279 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() || 2280 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
2280 Heap::TargetSpace(obj) == Heap::old_data_space()); 2281 Heap::TargetSpace(obj) == Heap::old_data_space());
2281 } 2282 }
2282 #endif 2283 #endif
2283 2284
2284 // New and old addresses cannot overlap. 2285 // New and old addresses cannot overlap.
2285 if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) { 2286 Heap::CopyBlock(reinterpret_cast<Object**>(new_addr),
2286 Heap::CopyBlock(new_addr, old_addr, obj_size); 2287 reinterpret_cast<Object**>(old_addr),
2287 } else { 2288 obj_size);
2288 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2289 old_addr,
2290 obj_size);
2291 }
2292 2289
2293 #ifdef DEBUG 2290 #ifdef DEBUG
2294 if (FLAG_gc_verbose) { 2291 if (FLAG_gc_verbose) {
2295 PrintF("relocate %p -> %p\n", old_addr, new_addr); 2292 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2296 } 2293 }
2297 #endif 2294 #endif
2298 2295
2299 HeapObject* copied_to = HeapObject::FromAddress(new_addr); 2296 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2300 if (copied_to->IsJSFunction()) { 2297 if (copied_to->IsJSFunction()) {
2301 PROFILE(FunctionMoveEvent(old_addr, new_addr)); 2298 PROFILE(FunctionMoveEvent(old_addr, new_addr));
2302 } 2299 }
2303 2300
2304 return obj_size; 2301 return obj_size;
2305 } 2302 }
2306 2303
2307 2304
2305 // -------------------------------------------------------------------------
2306 // Phase 5: rebuild remembered sets
2307
2308 void MarkCompactCollector::RebuildRSets() {
2309 #ifdef DEBUG
2310 ASSERT(state_ == RELOCATE_OBJECTS);
2311 state_ = REBUILD_RSETS;
2312 #endif
2313 Heap::RebuildRSets();
2314 }
2315
2316
2308 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) { 2317 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
2309 #ifdef ENABLE_LOGGING_AND_PROFILING 2318 #ifdef ENABLE_LOGGING_AND_PROFILING
2310 if (obj->IsCode()) { 2319 if (obj->IsCode()) {
2311 PROFILE(CodeDeleteEvent(obj->address())); 2320 PROFILE(CodeDeleteEvent(obj->address()));
2312 } else if (obj->IsJSFunction()) { 2321 } else if (obj->IsJSFunction()) {
2313 PROFILE(FunctionDeleteEvent(obj->address())); 2322 PROFILE(FunctionDeleteEvent(obj->address()));
2314 } 2323 }
2315 #endif 2324 #endif
2316 } 2325 }
2317 2326
2318 } } // namespace v8::internal 2327 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mark-compact.h ('k') | src/objects.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698