Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/mark-compact.cc

Issue 2101002: Cardmarking writebarrier. (Closed)
Patch Set: fixed review comments Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/mark-compact.h ('k') | src/objects.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
76 if (FLAG_collect_maps) ClearNonLiveTransitions(); 76 if (FLAG_collect_maps) ClearNonLiveTransitions();
77 77
78 SweepLargeObjectSpace(); 78 SweepLargeObjectSpace();
79 79
80 if (IsCompacting()) { 80 if (IsCompacting()) {
81 EncodeForwardingAddresses(); 81 EncodeForwardingAddresses();
82 82
83 UpdatePointers(); 83 UpdatePointers();
84 84
85 RelocateObjects(); 85 RelocateObjects();
86
87 RebuildRSets();
88
89 } else { 86 } else {
90 SweepSpaces(); 87 SweepSpaces();
91 } 88 }
92 89
93 Finish(); 90 Finish();
94 91
95 // Save the count of marked objects remaining after the collection and 92 // Save the count of marked objects remaining after the collection and
96 // null out the GC tracer. 93 // null out the GC tracer.
97 previous_marked_count_ = tracer_->marked_count(); 94 previous_marked_count_ = tracer_->marked_count();
98 ASSERT(previous_marked_count_ == 0); 95 ASSERT(previous_marked_count_ == 0);
(...skipping 14 matching lines...) Expand all
113 110
114 compacting_collection_ = 111 compacting_collection_ =
115 FLAG_always_compact || force_compaction_ || compact_on_next_gc_; 112 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
116 compact_on_next_gc_ = false; 113 compact_on_next_gc_ = false;
117 114
118 if (FLAG_never_compact) compacting_collection_ = false; 115 if (FLAG_never_compact) compacting_collection_ = false;
119 if (!Heap::map_space()->MapPointersEncodable()) 116 if (!Heap::map_space()->MapPointersEncodable())
120 compacting_collection_ = false; 117 compacting_collection_ = false;
121 if (FLAG_collect_maps) CreateBackPointers(); 118 if (FLAG_collect_maps) CreateBackPointers();
122 119
123 #ifdef DEBUG
124 if (compacting_collection_) {
125 // We will write bookkeeping information to the remembered set area
126 // starting now.
127 Page::set_rset_state(Page::NOT_IN_USE);
128 }
129 #endif
130
131 PagedSpaces spaces; 120 PagedSpaces spaces;
132 for (PagedSpace* space = spaces.next(); 121 for (PagedSpace* space = spaces.next();
133 space != NULL; space = spaces.next()) { 122 space != NULL; space = spaces.next()) {
134 space->PrepareForMarkCompact(compacting_collection_); 123 space->PrepareForMarkCompact(compacting_collection_);
135 } 124 }
136 125
137 #ifdef DEBUG 126 #ifdef DEBUG
138 live_bytes_ = 0; 127 live_bytes_ = 0;
139 live_young_objects_size_ = 0; 128 live_young_objects_size_ = 0;
140 live_old_pointer_objects_size_ = 0; 129 live_old_pointer_objects_size_ = 0;
141 live_old_data_objects_size_ = 0; 130 live_old_data_objects_size_ = 0;
142 live_code_objects_size_ = 0; 131 live_code_objects_size_ = 0;
143 live_map_objects_size_ = 0; 132 live_map_objects_size_ = 0;
144 live_cell_objects_size_ = 0; 133 live_cell_objects_size_ = 0;
145 live_lo_objects_size_ = 0; 134 live_lo_objects_size_ = 0;
146 #endif 135 #endif
147 } 136 }
148 137
149 138
150 void MarkCompactCollector::Finish() { 139 void MarkCompactCollector::Finish() {
151 #ifdef DEBUG 140 #ifdef DEBUG
152 ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS); 141 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
153 state_ = IDLE; 142 state_ = IDLE;
154 #endif 143 #endif
155 // The stub cache is not traversed during GC; clear the cache to 144 // The stub cache is not traversed during GC; clear the cache to
156 // force lazy re-initialization of it. This must be done after the 145 // force lazy re-initialization of it. This must be done after the
157 // GC, because it relies on the new address of certain old space 146 // GC, because it relies on the new address of certain old space
158 // objects (empty string, illegal builtin). 147 // objects (empty string, illegal builtin).
159 StubCache::Clear(); 148 StubCache::Clear();
160 149
161 ExternalStringTable::CleanUp(); 150 ExternalStringTable::CleanUp();
162 151
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
236 map_word.ClearMark(); 225 map_word.ClearMark();
237 InstanceType type = map_word.ToMap()->instance_type(); 226 InstanceType type = map_word.ToMap()->instance_type();
238 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; 227 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
239 228
240 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); 229 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
241 if (second != Heap::raw_unchecked_empty_string()) { 230 if (second != Heap::raw_unchecked_empty_string()) {
242 return object; 231 return object;
243 } 232 }
244 233
245 // Since we don't have the object's start, it is impossible to update the 234 // Since we don't have the object's start, it is impossible to update the
246 // remembered set. Therefore, we only replace the string with its left 235 // page dirty marks. Therefore, we only replace the string with its left
247 // substring when the remembered set does not change. 236 // substring when page dirty marks do not change.
248 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); 237 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
249 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object; 238 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
250 239
251 *p = first; 240 *p = first;
252 return HeapObject::cast(first); 241 return HeapObject::cast(first);
253 } 242 }
254 243
255 244
256 // Helper class for marking pointers in HeapObjects. 245 // Helper class for marking pointers in HeapObjects.
257 class MarkingVisitor : public ObjectVisitor { 246 class MarkingVisitor : public ObjectVisitor {
(...skipping 509 matching lines...) Expand 10 before | Expand all | Expand 10 after
767 void MarkCompactCollector::SweepLargeObjectSpace() { 756 void MarkCompactCollector::SweepLargeObjectSpace() {
768 #ifdef DEBUG 757 #ifdef DEBUG
769 ASSERT(state_ == MARK_LIVE_OBJECTS); 758 ASSERT(state_ == MARK_LIVE_OBJECTS);
770 state_ = 759 state_ =
771 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES; 760 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
772 #endif 761 #endif
773 // Deallocate unmarked objects and clear marked bits for marked objects. 762 // Deallocate unmarked objects and clear marked bits for marked objects.
774 Heap::lo_space()->FreeUnmarkedObjects(); 763 Heap::lo_space()->FreeUnmarkedObjects();
775 } 764 }
776 765
766
777 // Safe to use during marking phase only. 767 // Safe to use during marking phase only.
778 bool MarkCompactCollector::SafeIsMap(HeapObject* object) { 768 bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
779 MapWord metamap = object->map_word(); 769 MapWord metamap = object->map_word();
780 metamap.ClearMark(); 770 metamap.ClearMark();
781 return metamap.ToMap()->instance_type() == MAP_TYPE; 771 return metamap.ToMap()->instance_type() == MAP_TYPE;
782 } 772 }
783 773
774
784 void MarkCompactCollector::ClearNonLiveTransitions() { 775 void MarkCompactCollector::ClearNonLiveTransitions() {
785 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback); 776 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
786 // Iterate over the map space, setting map transitions that go from 777 // Iterate over the map space, setting map transitions that go from
787 // a marked map to an unmarked map to null transitions. At the same time, 778 // a marked map to an unmarked map to null transitions. At the same time,
788 // set all the prototype fields of maps back to their original value, 779 // set all the prototype fields of maps back to their original value,
789 // dropping the back pointers temporarily stored in the prototype field. 780 // dropping the back pointers temporarily stored in the prototype field.
790 // Setting the prototype field requires following the linked list of 781 // Setting the prototype field requires following the linked list of
791 // back pointers, reversing them all at once. This allows us to find 782 // back pointers, reversing them all at once. This allows us to find
792 // those maps with map transitions that need to be nulled, and only 783 // those maps with map transitions that need to be nulled, and only
793 // scan the descriptor arrays of those maps, not all maps. 784 // scan the descriptor arrays of those maps, not all maps.
(...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after
1069 } 1060 }
1070 1061
1071 1062
1072 // We scavange new space simultaneously with sweeping. This is done in two 1063 // We scavange new space simultaneously with sweeping. This is done in two
1073 // passes. 1064 // passes.
1074 // The first pass migrates all alive objects from one semispace to another or 1065 // The first pass migrates all alive objects from one semispace to another or
1075 // promotes them to old space. Forwading address is written directly into 1066 // promotes them to old space. Forwading address is written directly into
1076 // first word of object without any encoding. If object is dead we are writing 1067 // first word of object without any encoding. If object is dead we are writing
1077 // NULL as a forwarding address. 1068 // NULL as a forwarding address.
1078 // The second pass updates pointers to new space in all spaces. It is possible 1069 // The second pass updates pointers to new space in all spaces. It is possible
1079 // to encounter pointers to dead objects during traversal of remembered set for 1070 // to encounter pointers to dead objects during traversal of dirty regions we
1080 // map space because remembered set bits corresponding to dead maps are cleared 1071 // should clear them to avoid encountering them during next dirty regions
1081 // later during map space sweeping. 1072 // iteration.
1082 static void MigrateObject(Address dst, Address src, int size) { 1073 static void MigrateObject(Address dst,
1083 Heap::CopyBlock(reinterpret_cast<Object**>(dst), 1074 Address src,
1084 reinterpret_cast<Object**>(src), 1075 int size,
1085 size); 1076 bool to_old_space) {
1077 if (to_old_space) {
1078 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
1079 } else {
1080 Heap::CopyBlock(dst, src, size);
1081 }
1086 1082
1087 Memory::Address_at(src) = dst; 1083 Memory::Address_at(src) = dst;
1088 } 1084 }
1089 1085
1090 1086
1091 // Visitor for updating pointers from live objects in old spaces to new space. 1087 // Visitor for updating pointers from live objects in old spaces to new space.
1092 // It does not expect to encounter pointers to dead objects. 1088 // It does not expect to encounter pointers to dead objects.
1093 class PointersToNewGenUpdatingVisitor: public ObjectVisitor { 1089 class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1094 public: 1090 public:
1095 void VisitPointer(Object** p) { 1091 void VisitPointer(Object** p) {
(...skipping 26 matching lines...) Expand all
1122 HeapObject* obj = HeapObject::cast(*p); 1118 HeapObject* obj = HeapObject::cast(*p);
1123 Address old_addr = obj->address(); 1119 Address old_addr = obj->address();
1124 1120
1125 if (Heap::new_space()->Contains(obj)) { 1121 if (Heap::new_space()->Contains(obj)) {
1126 ASSERT(Heap::InFromSpace(*p)); 1122 ASSERT(Heap::InFromSpace(*p));
1127 *p = HeapObject::FromAddress(Memory::Address_at(old_addr)); 1123 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1128 } 1124 }
1129 } 1125 }
1130 }; 1126 };
1131 1127
1128
1132 // Visitor for updating pointers from live objects in old spaces to new space. 1129 // Visitor for updating pointers from live objects in old spaces to new space.
1133 // It can encounter pointers to dead objects in new space when traversing map 1130 // It can encounter pointers to dead objects in new space when traversing map
1134 // space (see comment for MigrateObject). 1131 // space (see comment for MigrateObject).
1135 static void UpdatePointerToNewGen(HeapObject** p) { 1132 static void UpdatePointerToNewGen(HeapObject** p) {
1136 if (!(*p)->IsHeapObject()) return; 1133 if (!(*p)->IsHeapObject()) return;
1137 1134
1138 Address old_addr = (*p)->address(); 1135 Address old_addr = (*p)->address();
1139 ASSERT(Heap::InFromSpace(*p)); 1136 ASSERT(Heap::InFromSpace(*p));
1140 1137
1141 Address new_addr = Memory::Address_at(old_addr); 1138 Address new_addr = Memory::Address_at(old_addr);
1142 1139
1143 // Object pointed by *p is dead. Update is not required. 1140 if (new_addr == NULL) {
1144 if (new_addr == NULL) return; 1141 // We encountered pointer to a dead object. Clear it so we will
1145 1142 // not visit it again during next iteration of dirty regions.
1146 *p = HeapObject::FromAddress(new_addr); 1143 *p = NULL;
1144 } else {
1145 *p = HeapObject::FromAddress(new_addr);
1146 }
1147 } 1147 }
1148 1148
1149 1149
1150 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) { 1150 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
1151 Address old_addr = HeapObject::cast(*p)->address(); 1151 Address old_addr = HeapObject::cast(*p)->address();
1152 Address new_addr = Memory::Address_at(old_addr); 1152 Address new_addr = Memory::Address_at(old_addr);
1153 return String::cast(HeapObject::FromAddress(new_addr)); 1153 return String::cast(HeapObject::FromAddress(new_addr));
1154 } 1154 }
1155 1155
1156 1156
1157 static bool TryPromoteObject(HeapObject* object, int object_size) { 1157 static bool TryPromoteObject(HeapObject* object, int object_size) {
1158 Object* result; 1158 Object* result;
1159 1159
1160 if (object_size > Heap::MaxObjectSizeInPagedSpace()) { 1160 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1161 result = Heap::lo_space()->AllocateRawFixedArray(object_size); 1161 result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1162 if (!result->IsFailure()) { 1162 if (!result->IsFailure()) {
1163 HeapObject* target = HeapObject::cast(result); 1163 HeapObject* target = HeapObject::cast(result);
1164 MigrateObject(target->address(), object->address(), object_size); 1164 MigrateObject(target->address(), object->address(), object_size, true);
1165 Heap::UpdateRSet(target);
1166 return true; 1165 return true;
1167 } 1166 }
1168 } else { 1167 } else {
1169 OldSpace* target_space = Heap::TargetSpace(object); 1168 OldSpace* target_space = Heap::TargetSpace(object);
1170 1169
1171 ASSERT(target_space == Heap::old_pointer_space() || 1170 ASSERT(target_space == Heap::old_pointer_space() ||
1172 target_space == Heap::old_data_space()); 1171 target_space == Heap::old_data_space());
1173 result = target_space->AllocateRaw(object_size); 1172 result = target_space->AllocateRaw(object_size);
1174 if (!result->IsFailure()) { 1173 if (!result->IsFailure()) {
1175 HeapObject* target = HeapObject::cast(result); 1174 HeapObject* target = HeapObject::cast(result);
1176 MigrateObject(target->address(), object->address(), object_size); 1175 MigrateObject(target->address(),
1177 if (target_space == Heap::old_pointer_space()) { 1176 object->address(),
1178 Heap::UpdateRSet(target); 1177 object_size,
1179 } 1178 target_space == Heap::old_pointer_space());
1180 return true; 1179 return true;
1181 } 1180 }
1182 } 1181 }
1183 1182
1184 return false; 1183 return false;
1185 } 1184 }
1186 1185
1187 1186
1188 static void SweepNewSpace(NewSpace* space) { 1187 static void SweepNewSpace(NewSpace* space) {
1189 Heap::CheckNewSpaceExpansionCriteria(); 1188 Heap::CheckNewSpaceExpansionCriteria();
(...skipping 19 matching lines...) Expand all
1209 MarkCompactCollector::tracer()->decrement_marked_count(); 1208 MarkCompactCollector::tracer()->decrement_marked_count();
1210 1209
1211 size = object->Size(); 1210 size = object->Size();
1212 survivors_size += size; 1211 survivors_size += size;
1213 1212
1214 // Aggressively promote young survivors to the old space. 1213 // Aggressively promote young survivors to the old space.
1215 if (TryPromoteObject(object, size)) { 1214 if (TryPromoteObject(object, size)) {
1216 continue; 1215 continue;
1217 } 1216 }
1218 1217
1219 // Promotion either failed or not required. 1218 // Promotion failed. Just migrate object to another semispace.
1220 // Copy the content of the object.
1221 Object* target = space->AllocateRaw(size); 1219 Object* target = space->AllocateRaw(size);
1222 1220
1223 // Allocation cannot fail at this point: semispaces are of equal size. 1221 // Allocation cannot fail at this point: semispaces are of equal size.
1224 ASSERT(!target->IsFailure()); 1222 ASSERT(!target->IsFailure());
1225 1223
1226 MigrateObject(HeapObject::cast(target)->address(), current, size); 1224 MigrateObject(HeapObject::cast(target)->address(),
1225 current,
1226 size,
1227 false);
1227 } else { 1228 } else {
1228 size = object->Size(); 1229 size = object->Size();
1229 Memory::Address_at(current) = NULL; 1230 Memory::Address_at(current) = NULL;
1230 } 1231 }
1231 } 1232 }
1232 1233
1233 // Second pass: find pointers to new space and update them. 1234 // Second pass: find pointers to new space and update them.
1234 PointersToNewGenUpdatingVisitor updating_visitor; 1235 PointersToNewGenUpdatingVisitor updating_visitor;
1235 1236
1236 // Update pointers in to space. 1237 // Update pointers in to space.
1237 HeapObject* object; 1238 HeapObject* object;
1238 for (Address current = space->bottom(); 1239 for (Address current = space->bottom();
1239 current < space->top(); 1240 current < space->top();
1240 current += object->Size()) { 1241 current += object->Size()) {
1241 object = HeapObject::FromAddress(current); 1242 object = HeapObject::FromAddress(current);
1242 1243
1243 object->IterateBody(object->map()->instance_type(), 1244 object->IterateBody(object->map()->instance_type(),
1244 object->Size(), 1245 object->Size(),
1245 &updating_visitor); 1246 &updating_visitor);
1246 } 1247 }
1247 1248
1248 // Update roots. 1249 // Update roots.
1249 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE); 1250 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
1250 1251
1251 // Update pointers in old spaces. 1252 // Update pointers in old spaces.
1252 Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen); 1253 Heap::IterateDirtyRegions(Heap::old_pointer_space(),
1253 Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen); 1254 &Heap::IteratePointersInDirtyRegion,
1254 Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen); 1255 &UpdatePointerToNewGen,
1256 Heap::WATERMARK_SHOULD_BE_VALID);
1257
1258 Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
1255 1259
1256 // Update pointers from cells. 1260 // Update pointers from cells.
1257 HeapObjectIterator cell_iterator(Heap::cell_space()); 1261 HeapObjectIterator cell_iterator(Heap::cell_space());
1258 for (HeapObject* cell = cell_iterator.next(); 1262 for (HeapObject* cell = cell_iterator.next();
1259 cell != NULL; 1263 cell != NULL;
1260 cell = cell_iterator.next()) { 1264 cell = cell_iterator.next()) {
1261 if (cell->IsJSGlobalPropertyCell()) { 1265 if (cell->IsJSGlobalPropertyCell()) {
1262 Address value_address = 1266 Address value_address =
1263 reinterpret_cast<Address>(cell) + 1267 reinterpret_cast<Address>(cell) +
1264 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 1268 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1310 1314
1311 for (Address current = p->ObjectAreaStart(); 1315 for (Address current = p->ObjectAreaStart();
1312 current < p->AllocationTop(); 1316 current < p->AllocationTop();
1313 current += object->Size()) { 1317 current += object->Size()) {
1314 object = HeapObject::FromAddress(current); 1318 object = HeapObject::FromAddress(current);
1315 if (object->IsMarked()) { 1319 if (object->IsMarked()) {
1316 object->ClearMark(); 1320 object->ClearMark();
1317 MarkCompactCollector::tracer()->decrement_marked_count(); 1321 MarkCompactCollector::tracer()->decrement_marked_count();
1318 1322
1319 if (!is_previous_alive) { // Transition from free to live. 1323 if (!is_previous_alive) { // Transition from free to live.
1320 dealloc(free_start, static_cast<int>(current - free_start), true); 1324 dealloc(free_start,
1325 static_cast<int>(current - free_start),
1326 true,
1327 false);
1321 is_previous_alive = true; 1328 is_previous_alive = true;
1322 } 1329 }
1323 } else { 1330 } else {
1324 MarkCompactCollector::ReportDeleteIfNeeded(object); 1331 MarkCompactCollector::ReportDeleteIfNeeded(object);
1325 if (is_previous_alive) { // Transition from live to free. 1332 if (is_previous_alive) { // Transition from live to free.
1326 free_start = current; 1333 free_start = current;
1327 is_previous_alive = false; 1334 is_previous_alive = false;
1328 } 1335 }
1329 } 1336 }
1330 // The object is now unmarked for the call to Size() at the top of the 1337 // The object is now unmarked for the call to Size() at the top of the
1331 // loop. 1338 // loop.
1332 } 1339 }
1333 1340
1334 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop()) 1341 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
1335 || (!is_previous_alive && free_start == p->ObjectAreaStart()); 1342 || (!is_previous_alive && free_start == p->ObjectAreaStart());
1336 1343
1337 if (page_is_empty) { 1344 if (page_is_empty) {
1338 // This page is empty. Check whether we are in the middle of 1345 // This page is empty. Check whether we are in the middle of
1339 // sequence of empty pages and start one if not. 1346 // sequence of empty pages and start one if not.
1340 if (!first_empty_page->is_valid()) { 1347 if (!first_empty_page->is_valid()) {
1341 first_empty_page = p; 1348 first_empty_page = p;
1342 prec_first_empty_page = prev; 1349 prec_first_empty_page = prev;
1343 } 1350 }
1344 1351
1345 if (!is_previous_alive) { 1352 if (!is_previous_alive) {
1346 // There are dead objects on this page. Update space accounting stats 1353 // There are dead objects on this page. Update space accounting stats
1347 // without putting anything into free list. 1354 // without putting anything into free list.
1348 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start); 1355 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
1349 if (size_in_bytes > 0) { 1356 if (size_in_bytes > 0) {
1350 dealloc(free_start, size_in_bytes, false); 1357 dealloc(free_start, size_in_bytes, false, true);
1358 } else {
1359 #ifdef DEBUG
1360 MemoryAllocator::ZapBlock(p->ObjectAreaStart(),
1361 Page::kObjectAreaSize);
1362 #endif
1351 } 1363 }
1364 } else {
1365 #ifdef DEBUG
1366 MemoryAllocator::ZapBlock(p->ObjectAreaStart(),
1367 Page::kObjectAreaSize);
1368 #endif
1352 } 1369 }
1353 } else { 1370 } else {
1354 // This page is not empty. Sequence of empty pages ended on the previous 1371 // This page is not empty. Sequence of empty pages ended on the previous
1355 // one. 1372 // one.
1356 if (first_empty_page->is_valid()) { 1373 if (first_empty_page->is_valid()) {
1357 space->FreePages(prec_first_empty_page, prev); 1374 space->FreePages(prec_first_empty_page, prev);
1358 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL); 1375 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
1359 } 1376 }
1360 1377
1361 // If there is a free ending area on one of the previous pages we have 1378 // If there is a free ending area on one of the previous pages we have
1362 // deallocate that area and put it on the free list. 1379 // deallocate that area and put it on the free list.
1363 if (last_free_size > 0) { 1380 if (last_free_size > 0) {
1364 dealloc(last_free_start, last_free_size, true); 1381 Page::FromAddress(last_free_start)->
1382 SetAllocationWatermark(last_free_start);
1383 dealloc(last_free_start, last_free_size, true, true);
1365 last_free_start = NULL; 1384 last_free_start = NULL;
1366 last_free_size = 0; 1385 last_free_size = 0;
1367 } 1386 }
1368 1387
1369 // If the last region of this page was not live we remember it. 1388 // If the last region of this page was not live we remember it.
1370 if (!is_previous_alive) { 1389 if (!is_previous_alive) {
1371 ASSERT(last_free_size == 0); 1390 ASSERT(last_free_size == 0);
1372 last_free_size = static_cast<int>(p->AllocationTop() - free_start); 1391 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
1373 last_free_start = free_start; 1392 last_free_start = free_start;
1374 } 1393 }
(...skipping 10 matching lines...) Expand all
1385 // to the beginning of first empty page. 1404 // to the beginning of first empty page.
1386 ASSERT(prev == space->AllocationTopPage()); 1405 ASSERT(prev == space->AllocationTopPage());
1387 1406
1388 new_allocation_top = first_empty_page->ObjectAreaStart(); 1407 new_allocation_top = first_empty_page->ObjectAreaStart();
1389 } 1408 }
1390 1409
1391 if (last_free_size > 0) { 1410 if (last_free_size > 0) {
1392 // There was a free ending area on the previous page. 1411 // There was a free ending area on the previous page.
1393 // Deallocate it without putting it into freelist and move allocation 1412 // Deallocate it without putting it into freelist and move allocation
1394 // top to the beginning of this free area. 1413 // top to the beginning of this free area.
1395 dealloc(last_free_start, last_free_size, false); 1414 dealloc(last_free_start, last_free_size, false, true);
1396 new_allocation_top = last_free_start; 1415 new_allocation_top = last_free_start;
1397 } 1416 }
1398 1417
1399 if (new_allocation_top != NULL) { 1418 if (new_allocation_top != NULL) {
1400 #ifdef DEBUG 1419 #ifdef DEBUG
1401 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top); 1420 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
1402 if (!first_empty_page->is_valid()) { 1421 if (!first_empty_page->is_valid()) {
1403 ASSERT(new_allocation_top_page == space->AllocationTopPage()); 1422 ASSERT(new_allocation_top_page == space->AllocationTopPage());
1404 } else if (last_free_size > 0) { 1423 } else if (last_free_size > 0) {
1405 ASSERT(new_allocation_top_page == prec_first_empty_page); 1424 ASSERT(new_allocation_top_page == prec_first_empty_page);
1406 } else { 1425 } else {
1407 ASSERT(new_allocation_top_page == first_empty_page); 1426 ASSERT(new_allocation_top_page == first_empty_page);
1408 } 1427 }
1409 #endif 1428 #endif
1410 1429
1411 space->SetTop(new_allocation_top); 1430 space->SetTop(new_allocation_top);
1412 } 1431 }
1413 } 1432 }
1414 1433
1415 1434
1416 void MarkCompactCollector::DeallocateOldPointerBlock(Address start, 1435 void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
1417 int size_in_bytes, 1436 int size_in_bytes,
1418 bool add_to_freelist) { 1437 bool add_to_freelist,
1419 Heap::ClearRSetRange(start, size_in_bytes); 1438 bool last_on_page) {
1420 Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist); 1439 Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
1421 } 1440 }
1422 1441
1423 1442
1424 void MarkCompactCollector::DeallocateOldDataBlock(Address start, 1443 void MarkCompactCollector::DeallocateOldDataBlock(Address start,
1425 int size_in_bytes, 1444 int size_in_bytes,
1426 bool add_to_freelist) { 1445 bool add_to_freelist,
1446 bool last_on_page) {
1427 Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist); 1447 Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
1428 } 1448 }
1429 1449
1430 1450
1431 void MarkCompactCollector::DeallocateCodeBlock(Address start, 1451 void MarkCompactCollector::DeallocateCodeBlock(Address start,
1432 int size_in_bytes, 1452 int size_in_bytes,
1433 bool add_to_freelist) { 1453 bool add_to_freelist,
1454 bool last_on_page) {
1434 Heap::code_space()->Free(start, size_in_bytes, add_to_freelist); 1455 Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
1435 } 1456 }
1436 1457
1437 1458
1438 void MarkCompactCollector::DeallocateMapBlock(Address start, 1459 void MarkCompactCollector::DeallocateMapBlock(Address start,
1439 int size_in_bytes, 1460 int size_in_bytes,
1440 bool add_to_freelist) { 1461 bool add_to_freelist,
1462 bool last_on_page) {
1441 // Objects in map space are assumed to have size Map::kSize and a 1463 // Objects in map space are assumed to have size Map::kSize and a
1442 // valid map in their first word. Thus, we break the free block up into 1464 // valid map in their first word. Thus, we break the free block up into
1443 // chunks and free them separately. 1465 // chunks and free them separately.
1444 ASSERT(size_in_bytes % Map::kSize == 0); 1466 ASSERT(size_in_bytes % Map::kSize == 0);
1445 Heap::ClearRSetRange(start, size_in_bytes);
1446 Address end = start + size_in_bytes; 1467 Address end = start + size_in_bytes;
1447 for (Address a = start; a < end; a += Map::kSize) { 1468 for (Address a = start; a < end; a += Map::kSize) {
1448 Heap::map_space()->Free(a, add_to_freelist); 1469 Heap::map_space()->Free(a, add_to_freelist);
1449 } 1470 }
1450 } 1471 }
1451 1472
1452 1473
1453 void MarkCompactCollector::DeallocateCellBlock(Address start, 1474 void MarkCompactCollector::DeallocateCellBlock(Address start,
1454 int size_in_bytes, 1475 int size_in_bytes,
1455 bool add_to_freelist) { 1476 bool add_to_freelist,
1477 bool last_on_page) {
1456 // Free-list elements in cell space are assumed to have a fixed size. 1478 // Free-list elements in cell space are assumed to have a fixed size.
1457 // We break the free block into chunks and add them to the free list 1479 // We break the free block into chunks and add them to the free list
1458 // individually. 1480 // individually.
1459 int size = Heap::cell_space()->object_size_in_bytes(); 1481 int size = Heap::cell_space()->object_size_in_bytes();
1460 ASSERT(size_in_bytes % size == 0); 1482 ASSERT(size_in_bytes % size == 0);
1461 Heap::ClearRSetRange(start, size_in_bytes);
1462 Address end = start + size_in_bytes; 1483 Address end = start + size_in_bytes;
1463 for (Address a = start; a < end; a += size) { 1484 for (Address a = start; a < end; a += size) {
1464 Heap::cell_space()->Free(a, add_to_freelist); 1485 Heap::cell_space()->Free(a, add_to_freelist);
1465 } 1486 }
1466 } 1487 }
1467 1488
1468 1489
1469 void MarkCompactCollector::EncodeForwardingAddresses() { 1490 void MarkCompactCollector::EncodeForwardingAddresses() {
1470 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); 1491 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1471 // Objects in the active semispace of the young generation may be 1492 // Objects in the active semispace of the young generation may be
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
1550 #ifdef DEBUG 1571 #ifdef DEBUG
1551 CheckNoMapsToEvacuate(); 1572 CheckNoMapsToEvacuate();
1552 #endif 1573 #endif
1553 } 1574 }
1554 1575
1555 void UpdateMapPointersInRoots() { 1576 void UpdateMapPointersInRoots() {
1556 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG); 1577 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
1557 GlobalHandles::IterateWeakRoots(&map_updating_visitor_); 1578 GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
1558 } 1579 }
1559 1580
1560 void FinishMapSpace() {
1561 // Iterate through to space and finish move.
1562 MapIterator it;
1563 HeapObject* o = it.next();
1564 for (; o != first_map_to_evacuate_; o = it.next()) {
1565 ASSERT(o != NULL);
1566 Map* map = reinterpret_cast<Map*>(o);
1567 ASSERT(!map->IsMarked());
1568 ASSERT(!map->IsOverflowed());
1569 ASSERT(map->IsMap());
1570 Heap::UpdateRSet(map);
1571 }
1572 }
1573
1574 void UpdateMapPointersInPagedSpace(PagedSpace* space) { 1581 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
1575 ASSERT(space != Heap::map_space()); 1582 ASSERT(space != Heap::map_space());
1576 1583
1577 PageIterator it(space, PageIterator::PAGES_IN_USE); 1584 PageIterator it(space, PageIterator::PAGES_IN_USE);
1578 while (it.has_next()) { 1585 while (it.has_next()) {
1579 Page* p = it.next(); 1586 Page* p = it.next();
1580 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop()); 1587 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
1581 } 1588 }
1582 } 1589 }
1583 1590
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
1656 ASSERT(map->IsMap()); 1663 ASSERT(map->IsMap());
1657 return map; 1664 return map;
1658 } 1665 }
1659 1666
1660 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) { 1667 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
1661 ASSERT(FreeListNode::IsFreeListNode(vacant_map)); 1668 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
1662 ASSERT(map_to_evacuate->IsMap()); 1669 ASSERT(map_to_evacuate->IsMap());
1663 1670
1664 ASSERT(Map::kSize % 4 == 0); 1671 ASSERT(Map::kSize % 4 == 0);
1665 1672
1666 Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()), 1673 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
1667 reinterpret_cast<Object**>(map_to_evacuate->address()), 1674 map_to_evacuate->address(),
1668 Map::kSize); 1675 Map::kSize);
1669 1676
1670 ASSERT(vacant_map->IsMap()); // Due to memcpy above. 1677 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
1671 1678
1672 MapWord forwarding_map_word = MapWord::FromMap(vacant_map); 1679 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
1673 forwarding_map_word.SetOverflow(); 1680 forwarding_map_word.SetOverflow();
1674 map_to_evacuate->set_map_word(forwarding_map_word); 1681 map_to_evacuate->set_map_word(forwarding_map_word);
1675 1682
1676 ASSERT(map_to_evacuate->map_word().IsOverflowed()); 1683 ASSERT(map_to_evacuate->map_word().IsOverflowed());
1677 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map); 1684 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
1678 } 1685 }
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
1741 // bits and free the nonlive blocks (for old and map spaces). We sweep 1748 // bits and free the nonlive blocks (for old and map spaces). We sweep
1742 // the map space last because freeing non-live maps overwrites them and 1749 // the map space last because freeing non-live maps overwrites them and
1743 // the other spaces rely on possibly non-live maps to get the sizes for 1750 // the other spaces rely on possibly non-live maps to get the sizes for
1744 // non-live objects. 1751 // non-live objects.
1745 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock); 1752 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
1746 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock); 1753 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
1747 SweepSpace(Heap::code_space(), &DeallocateCodeBlock); 1754 SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
1748 SweepSpace(Heap::cell_space(), &DeallocateCellBlock); 1755 SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
1749 SweepNewSpace(Heap::new_space()); 1756 SweepNewSpace(Heap::new_space());
1750 SweepSpace(Heap::map_space(), &DeallocateMapBlock); 1757 SweepSpace(Heap::map_space(), &DeallocateMapBlock);
1758
1759 Heap::IterateDirtyRegions(Heap::map_space(),
1760 &Heap::IteratePointersInDirtyMapsRegion,
1761 &UpdatePointerToNewGen,
1762 Heap::WATERMARK_SHOULD_BE_VALID);
1763
1751 int live_maps_size = Heap::map_space()->Size(); 1764 int live_maps_size = Heap::map_space()->Size();
1752 int live_maps = live_maps_size / Map::kSize; 1765 int live_maps = live_maps_size / Map::kSize;
1753 ASSERT(live_map_objects_size_ == live_maps_size); 1766 ASSERT(live_map_objects_size_ == live_maps_size);
1754 1767
1755 if (Heap::map_space()->NeedsCompaction(live_maps)) { 1768 if (Heap::map_space()->NeedsCompaction(live_maps)) {
1756 MapCompact map_compact(live_maps); 1769 MapCompact map_compact(live_maps);
1757 1770
1758 map_compact.CompactMaps(); 1771 map_compact.CompactMaps();
1759 map_compact.UpdateMapPointersInRoots(); 1772 map_compact.UpdateMapPointersInRoots();
1760 1773
1761 map_compact.FinishMapSpace();
1762 PagedSpaces spaces; 1774 PagedSpaces spaces;
1763 for (PagedSpace* space = spaces.next(); 1775 for (PagedSpace* space = spaces.next();
1764 space != NULL; space = spaces.next()) { 1776 space != NULL; space = spaces.next()) {
1765 if (space == Heap::map_space()) continue; 1777 if (space == Heap::map_space()) continue;
1766 map_compact.UpdateMapPointersInPagedSpace(space); 1778 map_compact.UpdateMapPointersInPagedSpace(space);
1767 } 1779 }
1768 map_compact.UpdateMapPointersInNewSpace(); 1780 map_compact.UpdateMapPointersInNewSpace();
1769 map_compact.UpdateMapPointersInLargeObjectSpace(); 1781 map_compact.UpdateMapPointersInLargeObjectSpace();
1770 1782
1771 map_compact.Finish(); 1783 map_compact.Finish();
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after
2024 Address obj_addr = obj->address(); 2036 Address obj_addr = obj->address();
2025 2037
2026 // Find the first live object's forwarding address. 2038 // Find the first live object's forwarding address.
2027 Page* p = Page::FromAddress(obj_addr); 2039 Page* p = Page::FromAddress(obj_addr);
2028 Address first_forwarded = p->mc_first_forwarded; 2040 Address first_forwarded = p->mc_first_forwarded;
2029 2041
2030 // Page start address of forwarded address. 2042 // Page start address of forwarded address.
2031 Page* forwarded_page = Page::FromAddress(first_forwarded); 2043 Page* forwarded_page = Page::FromAddress(first_forwarded);
2032 int forwarded_offset = forwarded_page->Offset(first_forwarded); 2044 int forwarded_offset = forwarded_page->Offset(first_forwarded);
2033 2045
2034 // Find end of allocation of in the page of first_forwarded. 2046 // Find end of allocation in the page of first_forwarded.
2035 Address mc_top = forwarded_page->mc_relocation_top; 2047 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
2036 int mc_top_offset = forwarded_page->Offset(mc_top);
2037 2048
2038 // Check if current object's forward pointer is in the same page 2049 // Check if current object's forward pointer is in the same page
2039 // as the first live object's forwarding pointer 2050 // as the first live object's forwarding pointer
2040 if (forwarded_offset + offset < mc_top_offset) { 2051 if (forwarded_offset + offset < mc_top_offset) {
2041 // In the same page. 2052 // In the same page.
2042 return first_forwarded + offset; 2053 return first_forwarded + offset;
2043 } 2054 }
2044 2055
2045 // Must be in the next page, NOTE: this may cross chunks. 2056 // Must be in the next page, NOTE: this may cross chunks.
2046 Page* next_page = forwarded_page->next_page(); 2057 Page* next_page = forwarded_page->next_page();
2047 ASSERT(next_page->is_valid()); 2058 ASSERT(next_page->is_valid());
2048 2059
2049 offset -= (mc_top_offset - forwarded_offset); 2060 offset -= (mc_top_offset - forwarded_offset);
2050 offset += Page::kObjectStartOffset; 2061 offset += Page::kObjectStartOffset;
2051 2062
2052 ASSERT_PAGE_OFFSET(offset); 2063 ASSERT_PAGE_OFFSET(offset);
2053 ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top); 2064 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
2054 2065
2055 return next_page->OffsetToAddress(offset); 2066 return next_page->OffsetToAddress(offset);
2056 } 2067 }
2057 2068
2058 2069
2059 // ------------------------------------------------------------------------- 2070 // -------------------------------------------------------------------------
2060 // Phase 4: Relocate objects 2071 // Phase 4: Relocate objects
2061 2072
2062 void MarkCompactCollector::RelocateObjects() { 2073 void MarkCompactCollector::RelocateObjects() {
2063 #ifdef DEBUG 2074 #ifdef DEBUG
(...skipping 24 matching lines...) Expand all
2088 ASSERT(live_maps_size == live_map_objects_size_); 2099 ASSERT(live_maps_size == live_map_objects_size_);
2089 ASSERT(live_data_olds_size == live_old_data_objects_size_); 2100 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2090 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_); 2101 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2091 ASSERT(live_codes_size == live_code_objects_size_); 2102 ASSERT(live_codes_size == live_code_objects_size_);
2092 ASSERT(live_cells_size == live_cell_objects_size_); 2103 ASSERT(live_cells_size == live_cell_objects_size_);
2093 ASSERT(live_news_size == live_young_objects_size_); 2104 ASSERT(live_news_size == live_young_objects_size_);
2094 2105
2095 // Flip from and to spaces 2106 // Flip from and to spaces
2096 Heap::new_space()->Flip(); 2107 Heap::new_space()->Flip();
2097 2108
2109 Heap::new_space()->MCCommitRelocationInfo();
2110
2098 // Set age_mark to bottom in to space 2111 // Set age_mark to bottom in to space
2099 Address mark = Heap::new_space()->bottom(); 2112 Address mark = Heap::new_space()->bottom();
2100 Heap::new_space()->set_age_mark(mark); 2113 Heap::new_space()->set_age_mark(mark);
2101 2114
2102 Heap::new_space()->MCCommitRelocationInfo();
2103 #ifdef DEBUG
2104 // It is safe to write to the remembered sets as remembered sets on a
2105 // page-by-page basis after committing the m-c forwarding pointer.
2106 Page::set_rset_state(Page::IN_USE);
2107 #endif
2108 PagedSpaces spaces; 2115 PagedSpaces spaces;
2109 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next()) 2116 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2110 space->MCCommitRelocationInfo(); 2117 space->MCCommitRelocationInfo();
2111 2118
2112 Heap::CheckNewSpaceExpansionCriteria(); 2119 Heap::CheckNewSpaceExpansionCriteria();
2113 Heap::IncrementYoungSurvivorsCounter(live_news_size); 2120 Heap::IncrementYoungSurvivorsCounter(live_news_size);
2114 } 2121 }
2115 2122
2116 2123
2117 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) { 2124 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2118 // Recover map pointer. 2125 // Recover map pointer.
2119 MapWord encoding = obj->map_word(); 2126 MapWord encoding = obj->map_word();
2120 Address map_addr = encoding.DecodeMapAddress(Heap::map_space()); 2127 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2121 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr))); 2128 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2122 2129
2123 // Get forwarding address before resetting map pointer 2130 // Get forwarding address before resetting map pointer
2124 Address new_addr = GetForwardingAddressInOldSpace(obj); 2131 Address new_addr = GetForwardingAddressInOldSpace(obj);
2125 2132
2126 // Reset map pointer. The meta map object may not be copied yet so 2133 // Reset map pointer. The meta map object may not be copied yet so
2127 // Map::cast does not yet work. 2134 // Map::cast does not yet work.
2128 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr))); 2135 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2129 2136
2130 Address old_addr = obj->address(); 2137 Address old_addr = obj->address();
2131 2138
2132 if (new_addr != old_addr) { 2139 if (new_addr != old_addr) {
2133 // Move contents. 2140 // Move contents.
2134 Heap::MoveBlock(reinterpret_cast<Object**>(new_addr), 2141 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2135 reinterpret_cast<Object**>(old_addr), 2142 old_addr,
2136 Map::kSize); 2143 Map::kSize);
2137 } 2144 }
2138 2145
2139 #ifdef DEBUG 2146 #ifdef DEBUG
2140 if (FLAG_gc_verbose) { 2147 if (FLAG_gc_verbose) {
2141 PrintF("relocate %p -> %p\n", old_addr, new_addr); 2148 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2142 } 2149 }
2143 #endif 2150 #endif
2144 2151
2145 return Map::kSize; 2152 return Map::kSize;
2146 } 2153 }
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
2183 // Get forwarding address before resetting map pointer. 2190 // Get forwarding address before resetting map pointer.
2184 Address new_addr = GetForwardingAddressInOldSpace(obj); 2191 Address new_addr = GetForwardingAddressInOldSpace(obj);
2185 2192
2186 // Reset the map pointer. 2193 // Reset the map pointer.
2187 int obj_size = RestoreMap(obj, space, new_addr, map_addr); 2194 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2188 2195
2189 Address old_addr = obj->address(); 2196 Address old_addr = obj->address();
2190 2197
2191 if (new_addr != old_addr) { 2198 if (new_addr != old_addr) {
2192 // Move contents. 2199 // Move contents.
2193 Heap::MoveBlock(reinterpret_cast<Object**>(new_addr), 2200 if (space == Heap::old_data_space()) {
2194 reinterpret_cast<Object**>(old_addr), 2201 Heap::MoveBlock(new_addr, old_addr, obj_size);
2195 obj_size); 2202 } else {
2203 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2204 old_addr,
2205 obj_size);
2206 }
2196 } 2207 }
2197 2208
2198 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode()); 2209 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2199 2210
2200 HeapObject* copied_to = HeapObject::FromAddress(new_addr); 2211 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2201 if (copied_to->IsJSFunction()) { 2212 if (copied_to->IsJSFunction()) {
2202 PROFILE(FunctionMoveEvent(old_addr, new_addr)); 2213 PROFILE(FunctionMoveEvent(old_addr, new_addr));
2203 } 2214 }
2204 2215
2205 return obj_size; 2216 return obj_size;
(...skipping 24 matching lines...) Expand all
2230 // Get forwarding address before resetting map pointer 2241 // Get forwarding address before resetting map pointer
2231 Address new_addr = GetForwardingAddressInOldSpace(obj); 2242 Address new_addr = GetForwardingAddressInOldSpace(obj);
2232 2243
2233 // Reset the map pointer. 2244 // Reset the map pointer.
2234 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr); 2245 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
2235 2246
2236 Address old_addr = obj->address(); 2247 Address old_addr = obj->address();
2237 2248
2238 if (new_addr != old_addr) { 2249 if (new_addr != old_addr) {
2239 // Move contents. 2250 // Move contents.
2240 Heap::MoveBlock(reinterpret_cast<Object**>(new_addr), 2251 Heap::MoveBlock(new_addr, old_addr, obj_size);
2241 reinterpret_cast<Object**>(old_addr),
2242 obj_size);
2243 } 2252 }
2244 2253
2245 HeapObject* copied_to = HeapObject::FromAddress(new_addr); 2254 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2246 if (copied_to->IsCode()) { 2255 if (copied_to->IsCode()) {
2247 // May also update inline cache target. 2256 // May also update inline cache target.
2248 Code::cast(copied_to)->Relocate(new_addr - old_addr); 2257 Code::cast(copied_to)->Relocate(new_addr - old_addr);
2249 // Notify the logger that compiled code has moved. 2258 // Notify the logger that compiled code has moved.
2250 PROFILE(CodeMoveEvent(old_addr, new_addr)); 2259 PROFILE(CodeMoveEvent(old_addr, new_addr));
2251 } 2260 }
2252 2261
(...skipping 15 matching lines...) Expand all
2268 if (Heap::new_space()->FromSpaceContains(new_addr)) { 2277 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2269 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <= 2278 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2270 Heap::new_space()->ToSpaceOffsetForAddress(old_addr)); 2279 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2271 } else { 2280 } else {
2272 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() || 2281 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
2273 Heap::TargetSpace(obj) == Heap::old_data_space()); 2282 Heap::TargetSpace(obj) == Heap::old_data_space());
2274 } 2283 }
2275 #endif 2284 #endif
2276 2285
2277 // New and old addresses cannot overlap. 2286 // New and old addresses cannot overlap.
2278 Heap::CopyBlock(reinterpret_cast<Object**>(new_addr), 2287 if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
2279 reinterpret_cast<Object**>(old_addr), 2288 Heap::CopyBlock(new_addr, old_addr, obj_size);
2280 obj_size); 2289 } else {
2290 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2291 old_addr,
2292 obj_size);
2293 }
2281 2294
2282 #ifdef DEBUG 2295 #ifdef DEBUG
2283 if (FLAG_gc_verbose) { 2296 if (FLAG_gc_verbose) {
2284 PrintF("relocate %p -> %p\n", old_addr, new_addr); 2297 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2285 } 2298 }
2286 #endif 2299 #endif
2287 2300
2288 HeapObject* copied_to = HeapObject::FromAddress(new_addr); 2301 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2289 if (copied_to->IsJSFunction()) { 2302 if (copied_to->IsJSFunction()) {
2290 PROFILE(FunctionMoveEvent(old_addr, new_addr)); 2303 PROFILE(FunctionMoveEvent(old_addr, new_addr));
2291 } 2304 }
2292 2305
2293 return obj_size; 2306 return obj_size;
2294 } 2307 }
2295 2308
2296 2309
2297 // -------------------------------------------------------------------------
2298 // Phase 5: rebuild remembered sets
2299
2300 void MarkCompactCollector::RebuildRSets() {
2301 #ifdef DEBUG
2302 ASSERT(state_ == RELOCATE_OBJECTS);
2303 state_ = REBUILD_RSETS;
2304 #endif
2305 Heap::RebuildRSets();
2306 }
2307
2308
2309 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) { 2310 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
2310 #ifdef ENABLE_LOGGING_AND_PROFILING 2311 #ifdef ENABLE_LOGGING_AND_PROFILING
2311 if (obj->IsCode()) { 2312 if (obj->IsCode()) {
2312 PROFILE(CodeDeleteEvent(obj->address())); 2313 PROFILE(CodeDeleteEvent(obj->address()));
2313 } else if (obj->IsJSFunction()) { 2314 } else if (obj->IsJSFunction()) {
2314 PROFILE(FunctionDeleteEvent(obj->address())); 2315 PROFILE(FunctionDeleteEvent(obj->address()));
2315 } 2316 }
2316 #endif 2317 #endif
2317 } 2318 }
2318 2319
2319 } } // namespace v8::internal 2320 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mark-compact.h ('k') | src/objects.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698