Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(163)

Side by Side Diff: src/mark-compact.cc

Issue 555072: Merge ObjectIterator::has_next and ObjectIterator::next methods.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/log.cc ('k') | src/runtime.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
122 122
123 #ifdef DEBUG 123 #ifdef DEBUG
124 if (compacting_collection_) { 124 if (compacting_collection_) {
125 // We will write bookkeeping information to the remembered set area 125 // We will write bookkeeping information to the remembered set area
126 // starting now. 126 // starting now.
127 Page::set_rset_state(Page::NOT_IN_USE); 127 Page::set_rset_state(Page::NOT_IN_USE);
128 } 128 }
129 #endif 129 #endif
130 130
131 PagedSpaces spaces; 131 PagedSpaces spaces;
132 while (PagedSpace* space = spaces.next()) { 132 for (PagedSpace* space = spaces.next();
133 space != NULL; space = spaces.next()) {
133 space->PrepareForMarkCompact(compacting_collection_); 134 space->PrepareForMarkCompact(compacting_collection_);
134 } 135 }
135 136
136 #ifdef DEBUG 137 #ifdef DEBUG
137 live_bytes_ = 0; 138 live_bytes_ = 0;
138 live_young_objects_ = 0; 139 live_young_objects_ = 0;
139 live_old_pointer_objects_ = 0; 140 live_old_pointer_objects_ = 0;
140 live_old_data_objects_ = 0; 141 live_old_data_objects_ = 0;
141 live_code_objects_ = 0; 142 live_code_objects_ = 0;
142 live_map_objects_ = 0; 143 live_map_objects_ = 0;
(...skipping 22 matching lines...) Expand all
165 166
166 // We compact the old generation on the next GC if it has gotten too 167 // We compact the old generation on the next GC if it has gotten too
167 // fragmented (ie, we could recover an expected amount of space by 168 // fragmented (ie, we could recover an expected amount of space by
168 // reclaiming the waste and free list blocks). 169 // reclaiming the waste and free list blocks).
169 static const int kFragmentationLimit = 15; // Percent. 170 static const int kFragmentationLimit = 15; // Percent.
170 static const int kFragmentationAllowed = 1 * MB; // Absolute. 171 static const int kFragmentationAllowed = 1 * MB; // Absolute.
171 int old_gen_recoverable = 0; 172 int old_gen_recoverable = 0;
172 int old_gen_used = 0; 173 int old_gen_used = 0;
173 174
174 OldSpaces spaces; 175 OldSpaces spaces;
175 while (OldSpace* space = spaces.next()) { 176 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
176 old_gen_recoverable += space->Waste() + space->AvailableFree(); 177 old_gen_recoverable += space->Waste() + space->AvailableFree();
177 old_gen_used += space->Size(); 178 old_gen_used += space->Size();
178 } 179 }
179 180
180 int old_gen_fragmentation = 181 int old_gen_fragmentation =
181 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); 182 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
182 if (old_gen_fragmentation > kFragmentationLimit && 183 if (old_gen_fragmentation > kFragmentationLimit &&
183 old_gen_recoverable > kFragmentationAllowed) { 184 old_gen_recoverable > kFragmentationAllowed) {
184 compact_on_next_gc_ = true; 185 compact_on_next_gc_ = true;
185 } 186 }
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
468 } 469 }
469 } 470 }
470 // The DescriptorArray descriptors contains a pointer to its contents array, 471 // The DescriptorArray descriptors contains a pointer to its contents array,
471 // but the contents array is already marked. 472 // but the contents array is already marked.
472 marking_stack.Push(descriptors); 473 marking_stack.Push(descriptors);
473 } 474 }
474 475
475 476
476 void MarkCompactCollector::CreateBackPointers() { 477 void MarkCompactCollector::CreateBackPointers() {
477 HeapObjectIterator iterator(Heap::map_space()); 478 HeapObjectIterator iterator(Heap::map_space());
478 while (iterator.has_next()) { 479 for (HeapObject* next_object = iterator.next();
479 Object* next_object = iterator.next(); 480 next_object != NULL; next_object = iterator.next()) {
480 if (next_object->IsMap()) { // Could also be ByteArray on free list. 481 if (next_object->IsMap()) { // Could also be ByteArray on free list.
481 Map* map = Map::cast(next_object); 482 Map* map = Map::cast(next_object);
482 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE && 483 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
483 map->instance_type() <= JS_FUNCTION_TYPE) { 484 map->instance_type() <= JS_FUNCTION_TYPE) {
484 map->CreateBackPointers(); 485 map->CreateBackPointers();
485 } else { 486 } else {
486 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array()); 487 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
487 } 488 }
488 } 489 }
489 } 490 }
(...skipping 12 matching lines...) Expand all
502 503
503 // Fill the marking stack with overflowed objects returned by the given 504 // Fill the marking stack with overflowed objects returned by the given
504 // iterator. Stop when the marking stack is filled or the end of the space 505 // iterator. Stop when the marking stack is filled or the end of the space
505 // is reached, whichever comes first. 506 // is reached, whichever comes first.
506 template<class T> 507 template<class T>
507 static void ScanOverflowedObjects(T* it) { 508 static void ScanOverflowedObjects(T* it) {
508 // The caller should ensure that the marking stack is initially not full, 509 // The caller should ensure that the marking stack is initially not full,
509 // so that we don't waste effort pointlessly scanning for objects. 510 // so that we don't waste effort pointlessly scanning for objects.
510 ASSERT(!marking_stack.is_full()); 511 ASSERT(!marking_stack.is_full());
511 512
512 while (it->has_next()) { 513 for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
513 HeapObject* object = it->next();
514 if (object->IsOverflowed()) { 514 if (object->IsOverflowed()) {
515 object->ClearOverflow(); 515 object->ClearOverflow();
516 ASSERT(object->IsMarked()); 516 ASSERT(object->IsMarked());
517 ASSERT(Heap::Contains(object)); 517 ASSERT(Heap::Contains(object));
518 marking_stack.Push(object); 518 marking_stack.Push(object);
519 if (marking_stack.is_full()) return; 519 if (marking_stack.is_full()) return;
520 } 520 }
521 } 521 }
522 } 522 }
523 523
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
786 // Iterate over the map space, setting map transitions that go from 786 // Iterate over the map space, setting map transitions that go from
787 // a marked map to an unmarked map to null transitions. At the same time, 787 // a marked map to an unmarked map to null transitions. At the same time,
788 // set all the prototype fields of maps back to their original value, 788 // set all the prototype fields of maps back to their original value,
789 // dropping the back pointers temporarily stored in the prototype field. 789 // dropping the back pointers temporarily stored in the prototype field.
790 // Setting the prototype field requires following the linked list of 790 // Setting the prototype field requires following the linked list of
791 // back pointers, reversing them all at once. This allows us to find 791 // back pointers, reversing them all at once. This allows us to find
792 // those maps with map transitions that need to be nulled, and only 792 // those maps with map transitions that need to be nulled, and only
793 // scan the descriptor arrays of those maps, not all maps. 793 // scan the descriptor arrays of those maps, not all maps.
794 // All of these actions are carried out only on maps of JSObjects 794 // All of these actions are carried out only on maps of JSObjects
795 // and related subtypes. 795 // and related subtypes.
796 while (map_iterator.has_next()) { 796 for (HeapObject* obj = map_iterator.next();
797 Map* map = reinterpret_cast<Map*>(map_iterator.next()); 797 obj != NULL; obj = map_iterator.next()) {
798 Map* map = reinterpret_cast<Map*>(obj);
798 if (!map->IsMarked() && map->IsByteArray()) continue; 799 if (!map->IsMarked() && map->IsByteArray()) continue;
799 800
800 ASSERT(SafeIsMap(map)); 801 ASSERT(SafeIsMap(map));
801 // Only JSObject and subtypes have map transitions and back pointers. 802 // Only JSObject and subtypes have map transitions and back pointers.
802 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue; 803 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
803 if (map->instance_type() > JS_FUNCTION_TYPE) continue; 804 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
804 // Follow the chain of back pointers to find the prototype. 805 // Follow the chain of back pointers to find the prototype.
805 Map* current = map; 806 Map* current = map;
806 while (SafeIsMap(current)) { 807 while (SafeIsMap(current)) {
807 current = reinterpret_cast<Map*>(current->prototype()); 808 current = reinterpret_cast<Map*>(current->prototype());
(...skipping 467 matching lines...) Expand 10 before | Expand all | Expand 10 after
1275 void UpdateMapPointersInRoots() { 1276 void UpdateMapPointersInRoots() {
1276 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG); 1277 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
1277 GlobalHandles::IterateWeakRoots(&map_updating_visitor_); 1278 GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
1278 } 1279 }
1279 1280
1280 void FinishMapSpace() { 1281 void FinishMapSpace() {
1281 // Iterate through to space and finish move. 1282 // Iterate through to space and finish move.
1282 MapIterator it; 1283 MapIterator it;
1283 HeapObject* o = it.next(); 1284 HeapObject* o = it.next();
1284 for (; o != first_map_to_evacuate_; o = it.next()) { 1285 for (; o != first_map_to_evacuate_; o = it.next()) {
1285 it.has_next(); // Must be called for side-effects, see bug 586. 1286 ASSERT(o != NULL);
1286 ASSERT(it.has_next());
1287 Map* map = reinterpret_cast<Map*>(o); 1287 Map* map = reinterpret_cast<Map*>(o);
1288 ASSERT(!map->IsMarked()); 1288 ASSERT(!map->IsMarked());
1289 ASSERT(!map->IsOverflowed()); 1289 ASSERT(!map->IsOverflowed());
1290 ASSERT(map->IsMap()); 1290 ASSERT(map->IsMap());
1291 Heap::UpdateRSet(map); 1291 Heap::UpdateRSet(map);
1292 } 1292 }
1293 } 1293 }
1294 1294
1295 void UpdateMapPointersInPagedSpace(PagedSpace* space) { 1295 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
1296 ASSERT(space != Heap::map_space()); 1296 ASSERT(space != Heap::map_space());
1297 1297
1298 PageIterator it(space, PageIterator::PAGES_IN_USE); 1298 PageIterator it(space, PageIterator::PAGES_IN_USE);
1299 while (it.has_next()) { 1299 while (it.has_next()) {
1300 Page* p = it.next(); 1300 Page* p = it.next();
1301 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop()); 1301 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
1302 } 1302 }
1303 } 1303 }
1304 1304
1305 void UpdateMapPointersInNewSpace() { 1305 void UpdateMapPointersInNewSpace() {
1306 NewSpace* space = Heap::new_space(); 1306 NewSpace* space = Heap::new_space();
1307 UpdateMapPointersInRange(space->bottom(), space->top()); 1307 UpdateMapPointersInRange(space->bottom(), space->top());
1308 } 1308 }
1309 1309
1310 void UpdateMapPointersInLargeObjectSpace() { 1310 void UpdateMapPointersInLargeObjectSpace() {
1311 LargeObjectIterator it(Heap::lo_space()); 1311 LargeObjectIterator it(Heap::lo_space());
1312 while (true) { 1312 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1313 if (!it.has_next()) break; 1313 UpdateMapPointersInObject(obj);
1314 UpdateMapPointersInObject(it.next());
1315 }
1316 } 1314 }
1317 1315
1318 void Finish() { 1316 void Finish() {
1319 Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_); 1317 Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
1320 } 1318 }
1321 1319
1322 private: 1320 private:
1323 int live_maps_; 1321 int live_maps_;
1324 Address to_evacuate_start_; 1322 Address to_evacuate_start_;
1325 MapIterator vacant_map_it_; 1323 MapIterator vacant_map_it_;
(...skipping 22 matching lines...) Expand all
1348 if (!map_word.IsOverflowed()) return; 1346 if (!map_word.IsOverflowed()) return;
1349 1347
1350 *p = GetForwardedMap(map_word); 1348 *p = GetForwardedMap(map_word);
1351 } 1349 }
1352 }; 1350 };
1353 1351
1354 static MapUpdatingVisitor map_updating_visitor_; 1352 static MapUpdatingVisitor map_updating_visitor_;
1355 1353
1356 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) { 1354 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
1357 while (true) { 1355 while (true) {
1358 it->has_next(); // Must be called for side-effects, see bug 586.
1359 ASSERT(it->has_next());
1360 HeapObject* next = it->next(); 1356 HeapObject* next = it->next();
1357 ASSERT(next != NULL);
1361 if (next == last) 1358 if (next == last)
1362 return NULL; 1359 return NULL;
1363 ASSERT(!next->IsOverflowed()); 1360 ASSERT(!next->IsOverflowed());
1364 ASSERT(!next->IsMarked()); 1361 ASSERT(!next->IsMarked());
1365 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next)); 1362 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
1366 if (next->IsMap() == live) 1363 if (next->IsMap() == live)
1367 return reinterpret_cast<Map*>(next); 1364 return reinterpret_cast<Map*>(next);
1368 } 1365 }
1369 } 1366 }
1370 1367
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
1439 size = UpdateMapPointersInObject(object); 1436 size = UpdateMapPointersInObject(object);
1440 ASSERT(size > 0); 1437 ASSERT(size > 0);
1441 } 1438 }
1442 } 1439 }
1443 1440
1444 #ifdef DEBUG 1441 #ifdef DEBUG
1445 void CheckNoMapsToEvacuate() { 1442 void CheckNoMapsToEvacuate() {
1446 if (!FLAG_enable_slow_asserts) 1443 if (!FLAG_enable_slow_asserts)
1447 return; 1444 return;
1448 1445
1449 while (map_to_evacuate_it_.has_next()) 1446 for (HeapObject* obj = map_to_evacuate_it_.next();
1450 ASSERT(FreeListNode::IsFreeListNode(map_to_evacuate_it_.next())); 1447 obj != NULL; obj = map_to_evacuate_it_.next())
1448 ASSERT(FreeListNode::IsFreeListNode(obj));
1451 } 1449 }
1452 #endif 1450 #endif
1453 }; 1451 };
1454 1452
1455 MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_; 1453 MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
1456 1454
1457 1455
1458 void MarkCompactCollector::SweepSpaces() { 1456 void MarkCompactCollector::SweepSpaces() {
1459 ASSERT(state_ == SWEEP_SPACES); 1457 ASSERT(state_ == SWEEP_SPACES);
1460 ASSERT(!IsCompacting()); 1458 ASSERT(!IsCompacting());
(...skipping 12 matching lines...) Expand all
1473 ASSERT(live_map_objects_ == live_maps); 1471 ASSERT(live_map_objects_ == live_maps);
1474 1472
1475 if (Heap::map_space()->NeedsCompaction(live_maps)) { 1473 if (Heap::map_space()->NeedsCompaction(live_maps)) {
1476 MapCompact map_compact(live_maps); 1474 MapCompact map_compact(live_maps);
1477 1475
1478 map_compact.CompactMaps(); 1476 map_compact.CompactMaps();
1479 map_compact.UpdateMapPointersInRoots(); 1477 map_compact.UpdateMapPointersInRoots();
1480 1478
1481 map_compact.FinishMapSpace(); 1479 map_compact.FinishMapSpace();
1482 PagedSpaces spaces; 1480 PagedSpaces spaces;
1483 while (PagedSpace* space = spaces.next()) { 1481 for (PagedSpace* space = spaces.next();
1482 space != NULL; space = spaces.next()) {
1484 if (space == Heap::map_space()) continue; 1483 if (space == Heap::map_space()) continue;
1485 map_compact.UpdateMapPointersInPagedSpace(space); 1484 map_compact.UpdateMapPointersInPagedSpace(space);
1486 } 1485 }
1487 map_compact.UpdateMapPointersInNewSpace(); 1486 map_compact.UpdateMapPointersInNewSpace();
1488 map_compact.UpdateMapPointersInLargeObjectSpace(); 1487 map_compact.UpdateMapPointersInLargeObjectSpace();
1489 1488
1490 map_compact.Finish(); 1489 map_compact.Finish();
1491 } 1490 }
1492 } 1491 }
1493 1492
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
1648 &UpdatePointersInOldObject); 1647 &UpdatePointersInOldObject);
1649 int live_codes = IterateLiveObjects(Heap::code_space(), 1648 int live_codes = IterateLiveObjects(Heap::code_space(),
1650 &UpdatePointersInOldObject); 1649 &UpdatePointersInOldObject);
1651 int live_cells = IterateLiveObjects(Heap::cell_space(), 1650 int live_cells = IterateLiveObjects(Heap::cell_space(),
1652 &UpdatePointersInOldObject); 1651 &UpdatePointersInOldObject);
1653 int live_news = IterateLiveObjects(Heap::new_space(), 1652 int live_news = IterateLiveObjects(Heap::new_space(),
1654 &UpdatePointersInNewObject); 1653 &UpdatePointersInNewObject);
1655 1654
1656 // Large objects do not move, the map word can be updated directly. 1655 // Large objects do not move, the map word can be updated directly.
1657 LargeObjectIterator it(Heap::lo_space()); 1656 LargeObjectIterator it(Heap::lo_space());
1658 while (it.has_next()) UpdatePointersInNewObject(it.next()); 1657 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1658 UpdatePointersInNewObject(obj);
1659 1659
1660 USE(live_maps); 1660 USE(live_maps);
1661 USE(live_pointer_olds); 1661 USE(live_pointer_olds);
1662 USE(live_data_olds); 1662 USE(live_data_olds);
1663 USE(live_codes); 1663 USE(live_codes);
1664 USE(live_cells); 1664 USE(live_cells);
1665 USE(live_news); 1665 USE(live_news);
1666 ASSERT(live_maps == live_map_objects_); 1666 ASSERT(live_maps == live_map_objects_);
1667 ASSERT(live_data_olds == live_old_data_objects_); 1667 ASSERT(live_data_olds == live_old_data_objects_);
1668 ASSERT(live_pointer_olds == live_old_pointer_objects_); 1668 ASSERT(live_pointer_olds == live_old_pointer_objects_);
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
1812 Address mark = Heap::new_space()->bottom(); 1812 Address mark = Heap::new_space()->bottom();
1813 Heap::new_space()->set_age_mark(mark); 1813 Heap::new_space()->set_age_mark(mark);
1814 1814
1815 Heap::new_space()->MCCommitRelocationInfo(); 1815 Heap::new_space()->MCCommitRelocationInfo();
1816 #ifdef DEBUG 1816 #ifdef DEBUG
1817 // It is safe to write to the remembered sets as remembered sets on a 1817 // It is safe to write to the remembered sets as remembered sets on a
1818 // page-by-page basis after committing the m-c forwarding pointer. 1818 // page-by-page basis after committing the m-c forwarding pointer.
1819 Page::set_rset_state(Page::IN_USE); 1819 Page::set_rset_state(Page::IN_USE);
1820 #endif 1820 #endif
1821 PagedSpaces spaces; 1821 PagedSpaces spaces;
1822 while (PagedSpace* space = spaces.next()) space->MCCommitRelocationInfo(); 1822 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
1823 space->MCCommitRelocationInfo();
1823 } 1824 }
1824 1825
1825 1826
1826 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) { 1827 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
1827 // Recover map pointer. 1828 // Recover map pointer.
1828 MapWord encoding = obj->map_word(); 1829 MapWord encoding = obj->map_word();
1829 Address map_addr = encoding.DecodeMapAddress(Heap::map_space()); 1830 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
1830 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr))); 1831 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
1831 1832
1832 // Get forwarding address before resetting map pointer 1833 // Get forwarding address before resetting map pointer
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
2010 #ifdef ENABLE_LOGGING_AND_PROFILING 2011 #ifdef ENABLE_LOGGING_AND_PROFILING
2011 if (obj->IsCode()) { 2012 if (obj->IsCode()) {
2012 LOG(CodeDeleteEvent(obj->address())); 2013 LOG(CodeDeleteEvent(obj->address()));
2013 } else if (obj->IsJSFunction()) { 2014 } else if (obj->IsJSFunction()) {
2014 LOG(FunctionDeleteEvent(obj->address())); 2015 LOG(FunctionDeleteEvent(obj->address()));
2015 } 2016 }
2016 #endif 2017 #endif
2017 } 2018 }
2018 2019
2019 } } // namespace v8::internal 2020 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/log.cc ('k') | src/runtime.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698