OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
(...skipping 26 matching lines...) Expand all Loading... |
38 #include "objects-visiting.h" | 38 #include "objects-visiting.h" |
39 #include "stub-cache.h" | 39 #include "stub-cache.h" |
40 | 40 |
41 namespace v8 { | 41 namespace v8 { |
42 namespace internal { | 42 namespace internal { |
43 | 43 |
44 // ------------------------------------------------------------------------- | 44 // ------------------------------------------------------------------------- |
45 // MarkCompactCollector | 45 // MarkCompactCollector |
46 | 46 |
47 bool MarkCompactCollector::force_compaction_ = false; | 47 bool MarkCompactCollector::force_compaction_ = false; |
| 48 bool MarkCompactCollector::sweep_precisely_ = false; |
48 bool MarkCompactCollector::compacting_collection_ = false; | 49 bool MarkCompactCollector::compacting_collection_ = false; |
49 bool MarkCompactCollector::compact_on_next_gc_ = false; | 50 bool MarkCompactCollector::compact_on_next_gc_ = false; |
50 | 51 |
51 GCTracer* MarkCompactCollector::tracer_ = NULL; | 52 GCTracer* MarkCompactCollector::tracer_ = NULL; |
52 | 53 |
53 #ifdef DEBUG | 54 #ifdef DEBUG |
54 MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE; | 55 MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE; |
55 | 56 |
56 // Counters used for debugging the marking phase of mark-compact or mark-sweep | 57 // Counters used for debugging the marking phase of mark-compact or mark-sweep |
57 // collection. | 58 // collection. |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
100 ASSERT(Marking::IsMarked(object)); | 101 ASSERT(Marking::IsMarked(object)); |
101 } | 102 } |
102 } | 103 } |
103 } | 104 } |
104 }; | 105 }; |
105 | 106 |
106 | 107 |
107 static void VerifyMarking(Address bottom, Address top) { | 108 static void VerifyMarking(Address bottom, Address top) { |
108 VerifyMarkingVisitor visitor; | 109 VerifyMarkingVisitor visitor; |
109 HeapObject* object; | 110 HeapObject* object; |
| 111 Address next_object_must_be_here_or_later = bottom; |
110 | 112 |
111 for (Address current = bottom; | 113 for (Address current = bottom; |
112 current < top; | 114 current < top; |
113 current += object->Size()) { | 115 current += kPointerSize) { |
114 object = HeapObject::FromAddress(current); | 116 object = HeapObject::FromAddress(current); |
115 if (Marking::IsMarked(object)) object->Iterate(&visitor); | 117 if (Marking::IsMarked(object)) { |
| 118 ASSERT(current >= next_object_must_be_here_or_later); |
| 119 object->Iterate(&visitor); |
| 120 next_object_must_be_here_or_later = current + object->Size(); |
| 121 } |
116 } | 122 } |
117 } | 123 } |
118 | 124 |
119 | 125 |
120 static void VerifyMarking(Page* p) { | 126 static void VerifyMarking(Page* p) { |
121 VerifyMarking(p->ObjectAreaStart(), p->AllocationTop()); | 127 VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd()); |
122 } | 128 } |
123 | 129 |
124 | 130 |
125 static void VerifyMarking(NewSpace* space) { | 131 static void VerifyMarking(NewSpace* space) { |
126 VerifyMarking(space->bottom(), space->top()); | 132 VerifyMarking(space->bottom(), space->top()); |
127 } | 133 } |
128 | 134 |
129 | 135 |
130 static void VerifyMarking(PagedSpace* space) { | 136 static void VerifyMarking(PagedSpace* space) { |
131 PageIterator it(space, PageIterator::PAGES_IN_USE); | 137 PageIterator it(space); |
132 | 138 |
133 while (it.has_next()) { | 139 while (it.has_next()) { |
134 VerifyMarking(it.next()); | 140 VerifyMarking(it.next()); |
135 } | 141 } |
136 } | 142 } |
137 | 143 |
138 | 144 |
139 static void VerifyMarking() { | 145 static void VerifyMarking() { |
140 VerifyMarking(Heap::old_pointer_space()); | 146 VerifyMarking(Heap::old_pointer_space()); |
141 VerifyMarking(Heap::old_data_space()); | 147 VerifyMarking(Heap::old_data_space()); |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
184 // Check that swept all marked objects and | 190 // Check that swept all marked objects and |
185 // null out the GC tracer. | 191 // null out the GC tracer. |
186 // TODO(gc) does not work with conservative sweeping. | 192 // TODO(gc) does not work with conservative sweeping. |
187 // ASSERT(tracer_->marked_count() == 0); | 193 // ASSERT(tracer_->marked_count() == 0); |
188 tracer_ = NULL; | 194 tracer_ = NULL; |
189 } | 195 } |
190 | 196 |
191 | 197 |
192 #ifdef DEBUG | 198 #ifdef DEBUG |
193 static void VerifyMarkbitsAreClean(PagedSpace* space) { | 199 static void VerifyMarkbitsAreClean(PagedSpace* space) { |
194 PageIterator it(space, PageIterator::PAGES_IN_USE); | 200 PageIterator it(space); |
195 | 201 |
196 while (it.has_next()) { | 202 while (it.has_next()) { |
197 Page* p = it.next(); | 203 Page* p = it.next(); |
198 ASSERT(p->markbits()->IsClean()); | 204 ASSERT(p->markbits()->IsClean()); |
199 } | 205 } |
200 } | 206 } |
201 | 207 |
202 static void VerifyMarkbitsAreClean() { | 208 static void VerifyMarkbitsAreClean() { |
203 VerifyMarkbitsAreClean(Heap::old_pointer_space()); | 209 VerifyMarkbitsAreClean(Heap::old_pointer_space()); |
204 VerifyMarkbitsAreClean(Heap::old_data_space()); | 210 VerifyMarkbitsAreClean(Heap::old_data_space()); |
205 VerifyMarkbitsAreClean(Heap::code_space()); | 211 VerifyMarkbitsAreClean(Heap::code_space()); |
206 VerifyMarkbitsAreClean(Heap::cell_space()); | 212 VerifyMarkbitsAreClean(Heap::cell_space()); |
207 VerifyMarkbitsAreClean(Heap::map_space()); | 213 VerifyMarkbitsAreClean(Heap::map_space()); |
208 } | 214 } |
209 #endif | 215 #endif |
210 | 216 |
211 | 217 |
212 static void ClearMarkbits(PagedSpace* space) { | 218 static void ClearMarkbits(PagedSpace* space) { |
213 PageIterator it(space, PageIterator::PAGES_IN_USE); | 219 PageIterator it(space); |
214 | 220 |
215 while (it.has_next()) { | 221 while (it.has_next()) { |
216 Page* p = it.next(); | 222 Page* p = it.next(); |
217 p->markbits()->Clear(); | 223 p->markbits()->Clear(); |
218 } | 224 } |
219 } | 225 } |
220 | 226 |
| 227 |
221 static void ClearMarkbits() { | 228 static void ClearMarkbits() { |
222 // We are sweeping code and map spaces precisely so clearing is not required. | 229 // TODO(gc): Clean the mark bits while sweeping. |
| 230 ClearMarkbits(Heap::code_space()); |
| 231 ClearMarkbits(Heap::map_space()); |
223 ClearMarkbits(Heap::old_pointer_space()); | 232 ClearMarkbits(Heap::old_pointer_space()); |
224 ClearMarkbits(Heap::old_data_space()); | 233 ClearMarkbits(Heap::old_data_space()); |
225 ClearMarkbits(Heap::cell_space()); | 234 ClearMarkbits(Heap::cell_space()); |
226 } | 235 } |
227 | 236 |
228 | 237 |
229 void Marking::TransferMark(Address old_start, Address new_start) { | 238 void Marking::TransferMark(Address old_start, Address new_start) { |
230 if (old_start == new_start) return; | 239 if (old_start == new_start) return; |
231 | 240 |
232 if (!IncrementalMarking::IsStopped()) { | 241 if (!IncrementalMarking::IsStopped()) { |
233 if (IncrementalMarking::IsBlack(HeapObject::FromAddress(old_start))) { | 242 if (IncrementalMarking::IsBlack(HeapObject::FromAddress(old_start))) { |
234 IncrementalMarking::MarkBlack(HeapObject::FromAddress(new_start)); | 243 IncrementalMarking::MarkBlack(HeapObject::FromAddress(new_start)); |
235 ClearMark(old_start); | 244 ClearMark(old_start); |
236 } else if (IncrementalMarking::IsGrey(HeapObject::FromAddress(old_start))) { | 245 } else if (IncrementalMarking::IsGrey(HeapObject::FromAddress(old_start))) { |
237 ClearMark(old_start + kPointerSize); | 246 ClearMark(old_start + kPointerSize); |
238 IncrementalMarking::WhiteToGrey(HeapObject::FromAddress(new_start)); | 247 IncrementalMarking::WhiteToGrey(HeapObject::FromAddress(new_start)); |
239 IncrementalMarking::RestartIfNotMarking(); | 248 IncrementalMarking::RestartIfNotMarking(); |
240 // TODO(gc): if we shift huge array in the loop we might end up pushing | 249 // TODO(gc): if we shift huge array in the loop we might end up pushing |
241 // to much to marking stack. maybe we should check one or two elements | 250 // to much to marking stack. maybe we should check one or two elements |
242 // on top of it to see whether they are equal to old_start. | 251 // on top of it to see whether they are equal to old_start. |
243 } | 252 } |
244 } else { | 253 } else { |
245 if (Heap::InNewSpace(old_start) || | 254 if (Heap::InNewSpace(old_start) || !IsMarked(old_start)) { |
246 Page::FromAddress(old_start)->IsFlagSet(Page::IS_CONTINUOUS) || | |
247 !IsMarked(old_start)) { | |
248 return; | 255 return; |
249 } | 256 } |
250 SetMark(new_start); | 257 SetMark(new_start); |
251 } | 258 } |
252 } | 259 } |
253 | 260 |
254 | 261 |
255 void MarkCompactCollector::Prepare(GCTracer* tracer) { | 262 void MarkCompactCollector::Prepare(GCTracer* tracer) { |
256 FLAG_flush_code = false; | 263 FLAG_flush_code = false; |
257 FLAG_always_compact = false; | 264 FLAG_always_compact = false; |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
340 // We compact the old generation on the next GC if it has gotten too | 347 // We compact the old generation on the next GC if it has gotten too |
341 // fragmented (ie, we could recover an expected amount of space by | 348 // fragmented (ie, we could recover an expected amount of space by |
342 // reclaiming the waste and free list blocks). | 349 // reclaiming the waste and free list blocks). |
343 static const int kFragmentationLimit = 15; // Percent. | 350 static const int kFragmentationLimit = 15; // Percent. |
344 static const int kFragmentationAllowed = 1 * MB; // Absolute. | 351 static const int kFragmentationAllowed = 1 * MB; // Absolute. |
345 intptr_t old_gen_recoverable = 0; | 352 intptr_t old_gen_recoverable = 0; |
346 intptr_t old_gen_used = 0; | 353 intptr_t old_gen_used = 0; |
347 | 354 |
348 OldSpaces spaces; | 355 OldSpaces spaces; |
349 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { | 356 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { |
350 old_gen_recoverable += space->Waste() + space->AvailableFree(); | 357 old_gen_recoverable += space->Waste() + space->Available(); |
351 old_gen_used += space->Size(); | 358 old_gen_used += space->Size(); |
352 } | 359 } |
353 | 360 |
354 int old_gen_fragmentation = | 361 int old_gen_fragmentation = |
355 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); | 362 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); |
356 if (old_gen_fragmentation > kFragmentationLimit && | 363 if (old_gen_fragmentation > kFragmentationLimit && |
357 old_gen_recoverable > kFragmentationAllowed) { | 364 old_gen_recoverable > kFragmentationAllowed) { |
358 compact_on_next_gc_ = true; | 365 compact_on_next_gc_ = true; |
359 } | 366 } |
360 } | 367 } |
(...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
575 &FlexibleBodyVisitor<StaticMarkingVisitor, | 582 &FlexibleBodyVisitor<StaticMarkingVisitor, |
576 FixedArray::BodyDescriptor, | 583 FixedArray::BodyDescriptor, |
577 void>::Visit); | 584 void>::Visit); |
578 | 585 |
579 table_.Register(kVisitGlobalContext, | 586 table_.Register(kVisitGlobalContext, |
580 &FixedBodyVisitor<StaticMarkingVisitor, | 587 &FixedBodyVisitor<StaticMarkingVisitor, |
581 Context::MarkCompactBodyDescriptor, | 588 Context::MarkCompactBodyDescriptor, |
582 void>::Visit); | 589 void>::Visit); |
583 | 590 |
584 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); | 591 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); |
| 592 table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit); |
585 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); | 593 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); |
586 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); | 594 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); |
587 | 595 |
588 table_.Register(kVisitOddball, | 596 table_.Register(kVisitOddball, |
589 &FixedBodyVisitor<StaticMarkingVisitor, | 597 &FixedBodyVisitor<StaticMarkingVisitor, |
590 Oddball::BodyDescriptor, | 598 Oddball::BodyDescriptor, |
591 void>::Visit); | 599 void>::Visit); |
592 table_.Register(kVisitMap, | 600 table_.Register(kVisitMap, |
593 &FixedBodyVisitor<StaticMarkingVisitor, | 601 &FixedBodyVisitor<StaticMarkingVisitor, |
594 Map::BodyDescriptor, | 602 Map::BodyDescriptor, |
(...skipping 646 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1241 } | 1249 } |
1242 } | 1250 } |
1243 // The DescriptorArray descriptors contains a pointer to its contents array, | 1251 // The DescriptorArray descriptors contains a pointer to its contents array, |
1244 // but the contents array is already marked. | 1252 // but the contents array is already marked. |
1245 marking_stack.Push(descriptors); | 1253 marking_stack.Push(descriptors); |
1246 } | 1254 } |
1247 | 1255 |
1248 | 1256 |
1249 void MarkCompactCollector::CreateBackPointers() { | 1257 void MarkCompactCollector::CreateBackPointers() { |
1250 HeapObjectIterator iterator(Heap::map_space()); | 1258 HeapObjectIterator iterator(Heap::map_space()); |
1251 for (HeapObject* next_object = iterator.next(); | 1259 for (HeapObject* next_object = iterator.Next(); |
1252 next_object != NULL; next_object = iterator.next()) { | 1260 next_object != NULL; next_object = iterator.Next()) { |
1253 if (next_object->IsMap()) { // Could also be ByteArray on free list. | 1261 if (next_object->IsMap()) { // Could also be FreeSpace object on free list. |
1254 Map* map = Map::cast(next_object); | 1262 Map* map = Map::cast(next_object); |
1255 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE && | 1263 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE && |
1256 map->instance_type() <= JS_FUNCTION_TYPE) { | 1264 map->instance_type() <= JS_FUNCTION_TYPE) { |
1257 map->CreateBackPointers(); | 1265 map->CreateBackPointers(); |
1258 } else { | 1266 } else { |
1259 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array()); | 1267 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array()); |
1260 } | 1268 } |
1261 } | 1269 } |
1262 } | 1270 } |
1263 } | 1271 } |
(...skipping 301 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1565 // Iterate over the map space, setting map transitions that go from | 1573 // Iterate over the map space, setting map transitions that go from |
1566 // a marked map to an unmarked map to null transitions. At the same time, | 1574 // a marked map to an unmarked map to null transitions. At the same time, |
1567 // set all the prototype fields of maps back to their original value, | 1575 // set all the prototype fields of maps back to their original value, |
1568 // dropping the back pointers temporarily stored in the prototype field. | 1576 // dropping the back pointers temporarily stored in the prototype field. |
1569 // Setting the prototype field requires following the linked list of | 1577 // Setting the prototype field requires following the linked list of |
1570 // back pointers, reversing them all at once. This allows us to find | 1578 // back pointers, reversing them all at once. This allows us to find |
1571 // those maps with map transitions that need to be nulled, and only | 1579 // those maps with map transitions that need to be nulled, and only |
1572 // scan the descriptor arrays of those maps, not all maps. | 1580 // scan the descriptor arrays of those maps, not all maps. |
1573 // All of these actions are carried out only on maps of JSObjects | 1581 // All of these actions are carried out only on maps of JSObjects |
1574 // and related subtypes. | 1582 // and related subtypes. |
1575 for (HeapObject* obj = map_iterator.next(); | 1583 for (HeapObject* obj = map_iterator.Next(); |
1576 obj != NULL; obj = map_iterator.next()) { | 1584 obj != NULL; obj = map_iterator.Next()) { |
1577 Map* map = reinterpret_cast<Map*>(obj); | 1585 Map* map = reinterpret_cast<Map*>(obj); |
1578 if (!Marking::IsMarked(map) && map->IsByteArray()) continue; | 1586 if (!Marking::IsMarked(map) && map->IsFreeSpace()) continue; |
1579 | 1587 |
1580 ASSERT(SafeIsMap(map)); | 1588 ASSERT(SafeIsMap(map)); |
1581 // Only JSObject and subtypes have map transitions and back pointers. | 1589 // Only JSObject and subtypes have map transitions and back pointers. |
1582 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue; | 1590 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue; |
1583 if (map->instance_type() > JS_FUNCTION_TYPE) continue; | 1591 if (map->instance_type() > JS_FUNCTION_TYPE) continue; |
1584 | 1592 |
1585 if (Marking::IsMarked(map) && | 1593 if (Marking::IsMarked(map) && |
1586 map->attached_to_shared_function_info()) { | 1594 map->attached_to_shared_function_info()) { |
1587 // This map is used for inobject slack tracking and has been detached | 1595 // This map is used for inobject slack tracking and has been detached |
1588 // from SharedFunctionInfo during the mark phase. | 1596 // from SharedFunctionInfo during the mark phase. |
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1831 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE); | 1839 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE); |
1832 LiveObjectList::IterateElements(&updating_visitor); | 1840 LiveObjectList::IterateElements(&updating_visitor); |
1833 | 1841 |
1834 { | 1842 { |
1835 StoreBufferRebuildScope scope; | 1843 StoreBufferRebuildScope scope; |
1836 StoreBuffer::IteratePointersToNewSpace(&UpdatePointerToNewGen); | 1844 StoreBuffer::IteratePointersToNewSpace(&UpdatePointerToNewGen); |
1837 } | 1845 } |
1838 | 1846 |
1839 // Update pointers from cells. | 1847 // Update pointers from cells. |
1840 HeapObjectIterator cell_iterator(Heap::cell_space()); | 1848 HeapObjectIterator cell_iterator(Heap::cell_space()); |
1841 for (HeapObject* cell = cell_iterator.next(); | 1849 for (HeapObject* cell = cell_iterator.Next(); |
1842 cell != NULL; | 1850 cell != NULL; |
1843 cell = cell_iterator.next()) { | 1851 cell = cell_iterator.Next()) { |
1844 if (cell->IsJSGlobalPropertyCell()) { | 1852 if (cell->IsJSGlobalPropertyCell()) { |
1845 Address value_address = | 1853 Address value_address = |
1846 reinterpret_cast<Address>(cell) + | 1854 reinterpret_cast<Address>(cell) + |
1847 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); | 1855 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); |
1848 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); | 1856 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); |
1849 } | 1857 } |
1850 } | 1858 } |
1851 | 1859 |
1852 // Update pointer from the global contexts list. | 1860 // Update pointer from the global contexts list. |
1853 updating_visitor.VisitPointer(Heap::global_contexts_list_address()); | 1861 updating_visitor.VisitPointer(Heap::global_contexts_list_address()); |
(...skipping 27 matching lines...) Expand all Loading... |
1881 ASSERT(cells[free_cell_index] == 0); | 1889 ASSERT(cells[free_cell_index] == 0); |
1882 while (free_cell_index < region_end && cells[free_cell_index] == 0) { | 1890 while (free_cell_index < region_end && cells[free_cell_index] == 0) { |
1883 free_cell_index++; | 1891 free_cell_index++; |
1884 } | 1892 } |
1885 | 1893 |
1886 if (free_cell_index >= region_end) { | 1894 if (free_cell_index >= region_end) { |
1887 return free_cell_index; | 1895 return free_cell_index; |
1888 } | 1896 } |
1889 | 1897 |
1890 uint32_t free_end = Page::MarkbitsBitmap::CellToIndex(free_cell_index); | 1898 uint32_t free_end = Page::MarkbitsBitmap::CellToIndex(free_cell_index); |
1891 space->DeallocateBlock(p->MarkbitIndexToAddress(free_start), | 1899 space->Free(p->MarkbitIndexToAddress(free_start), |
1892 (free_end - free_start) << kPointerSizeLog2, | 1900 (free_end - free_start) << kPointerSizeLog2); |
1893 true); | |
1894 | 1901 |
1895 return free_cell_index; | 1902 return free_cell_index; |
1896 } | 1903 } |
1897 | 1904 |
1898 | 1905 |
1899 INLINE(static uint32_t NextCandidate(uint32_t cell_index, | 1906 INLINE(static uint32_t NextCandidate(uint32_t cell_index, |
1900 uint32_t last_cell_index, | 1907 uint32_t last_cell_index, |
1901 uint32_t* cells)); | 1908 uint32_t* cells)); |
1902 | 1909 |
1903 | 1910 |
(...skipping 22 matching lines...) Expand all Loading... |
1926 CompilerIntrinsics::CountLeadingZeros(cells[cell_index - 1]) + 1; | 1933 CompilerIntrinsics::CountLeadingZeros(cells[cell_index - 1]) + 1; |
1927 Address addr = | 1934 Address addr = |
1928 p->MarkbitIndexToAddress( | 1935 p->MarkbitIndexToAddress( |
1929 Page::MarkbitsBitmap::CellToIndex(cell_index) - leading_zeroes); | 1936 Page::MarkbitsBitmap::CellToIndex(cell_index) - leading_zeroes); |
1930 HeapObject* obj = HeapObject::FromAddress(addr); | 1937 HeapObject* obj = HeapObject::FromAddress(addr); |
1931 ASSERT(obj->map()->IsMap()); | 1938 ASSERT(obj->map()->IsMap()); |
1932 return (obj->Size() >> kPointerSizeLog2) - leading_zeroes; | 1939 return (obj->Size() >> kPointerSizeLog2) - leading_zeroes; |
1933 } | 1940 } |
1934 | 1941 |
1935 | 1942 |
| 1943 static const int kStartTableEntriesPerLine = 5; |
| 1944 static const int kStartTableLines = 171; |
| 1945 static const int kStartTableInvalidLine = 127; |
| 1946 static const int kStartTableUnusedEntry = 126; |
| 1947 |
| 1948 #define _ kStartTableUnusedEntry |
| 1949 #define X kStartTableInvalidLine |
| 1950 // Mark-bit to object start offset table. |
| 1951 // |
| 1952 // The line is indexed by the mark bits in a byte. The first number on |
| 1953 // the line describes the number of live object starts for the line and the |
| 1954 // other numbers on the line describe the offsets (in words) of the object |
| 1955 // starts. |
| 1956 // |
| 1957 // Since objects are at least 2 words large we don't have entries for two |
| 1958 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits. |
| 1959 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = { |
| 1960 0, _, _, _, _, // 0 |
| 1961 1, 0, _, _, _, // 1 |
| 1962 1, 1, _, _, _, // 2 |
| 1963 X, _, _, _, _, // 3 |
| 1964 1, 2, _, _, _, // 4 |
| 1965 2, 0, 2, _, _, // 5 |
| 1966 X, _, _, _, _, // 6 |
| 1967 X, _, _, _, _, // 7 |
| 1968 1, 3, _, _, _, // 8 |
| 1969 2, 0, 3, _, _, // 9 |
| 1970 2, 1, 3, _, _, // 10 |
| 1971 X, _, _, _, _, // 11 |
| 1972 X, _, _, _, _, // 12 |
| 1973 X, _, _, _, _, // 13 |
| 1974 X, _, _, _, _, // 14 |
| 1975 X, _, _, _, _, // 15 |
| 1976 1, 4, _, _, _, // 16 |
| 1977 2, 0, 4, _, _, // 17 |
| 1978 2, 1, 4, _, _, // 18 |
| 1979 X, _, _, _, _, // 19 |
| 1980 2, 2, 4, _, _, // 20 |
| 1981 3, 0, 2, 4, _, // 21 |
| 1982 X, _, _, _, _, // 22 |
| 1983 X, _, _, _, _, // 23 |
| 1984 X, _, _, _, _, // 24 |
| 1985 X, _, _, _, _, // 25 |
| 1986 X, _, _, _, _, // 26 |
| 1987 X, _, _, _, _, // 27 |
| 1988 X, _, _, _, _, // 28 |
| 1989 X, _, _, _, _, // 29 |
| 1990 X, _, _, _, _, // 30 |
| 1991 X, _, _, _, _, // 31 |
| 1992 1, 5, _, _, _, // 32 |
| 1993 2, 0, 5, _, _, // 33 |
| 1994 2, 1, 5, _, _, // 34 |
| 1995 X, _, _, _, _, // 35 |
| 1996 2, 2, 5, _, _, // 36 |
| 1997 3, 0, 2, 5, _, // 37 |
| 1998 X, _, _, _, _, // 38 |
| 1999 X, _, _, _, _, // 39 |
| 2000 2, 3, 5, _, _, // 40 |
| 2001 3, 0, 3, 5, _, // 41 |
| 2002 3, 1, 3, 5, _, // 42 |
| 2003 X, _, _, _, _, // 43 |
| 2004 X, _, _, _, _, // 44 |
| 2005 X, _, _, _, _, // 45 |
| 2006 X, _, _, _, _, // 46 |
| 2007 X, _, _, _, _, // 47 |
| 2008 X, _, _, _, _, // 48 |
| 2009 X, _, _, _, _, // 49 |
| 2010 X, _, _, _, _, // 50 |
| 2011 X, _, _, _, _, // 51 |
| 2012 X, _, _, _, _, // 52 |
| 2013 X, _, _, _, _, // 53 |
| 2014 X, _, _, _, _, // 54 |
| 2015 X, _, _, _, _, // 55 |
| 2016 X, _, _, _, _, // 56 |
| 2017 X, _, _, _, _, // 57 |
| 2018 X, _, _, _, _, // 58 |
| 2019 X, _, _, _, _, // 59 |
| 2020 X, _, _, _, _, // 60 |
| 2021 X, _, _, _, _, // 61 |
| 2022 X, _, _, _, _, // 62 |
| 2023 X, _, _, _, _, // 63 |
| 2024 1, 6, _, _, _, // 64 |
| 2025 2, 0, 6, _, _, // 65 |
| 2026 2, 1, 6, _, _, // 66 |
| 2027 X, _, _, _, _, // 67 |
| 2028 2, 2, 6, _, _, // 68 |
| 2029 3, 0, 2, 6, _, // 69 |
| 2030 X, _, _, _, _, // 70 |
| 2031 X, _, _, _, _, // 71 |
| 2032 2, 3, 6, _, _, // 72 |
| 2033 3, 0, 3, 6, _, // 73 |
| 2034 3, 1, 3, 6, _, // 74 |
| 2035 X, _, _, _, _, // 75 |
| 2036 X, _, _, _, _, // 76 |
| 2037 X, _, _, _, _, // 77 |
| 2038 X, _, _, _, _, // 78 |
| 2039 X, _, _, _, _, // 79 |
| 2040 2, 4, 6, _, _, // 80 |
| 2041 3, 0, 4, 6, _, // 81 |
| 2042 3, 1, 4, 6, _, // 82 |
| 2043 X, _, _, _, _, // 83 |
| 2044 3, 2, 4, 6, _, // 84 |
| 2045 4, 0, 2, 4, 6, // 85 |
| 2046 X, _, _, _, _, // 86 |
| 2047 X, _, _, _, _, // 87 |
| 2048 X, _, _, _, _, // 88 |
| 2049 X, _, _, _, _, // 89 |
| 2050 X, _, _, _, _, // 90 |
| 2051 X, _, _, _, _, // 91 |
| 2052 X, _, _, _, _, // 92 |
| 2053 X, _, _, _, _, // 93 |
| 2054 X, _, _, _, _, // 94 |
| 2055 X, _, _, _, _, // 95 |
| 2056 X, _, _, _, _, // 96 |
| 2057 X, _, _, _, _, // 97 |
| 2058 X, _, _, _, _, // 98 |
| 2059 X, _, _, _, _, // 99 |
| 2060 X, _, _, _, _, // 100 |
| 2061 X, _, _, _, _, // 101 |
| 2062 X, _, _, _, _, // 102 |
| 2063 X, _, _, _, _, // 103 |
| 2064 X, _, _, _, _, // 104 |
| 2065 X, _, _, _, _, // 105 |
| 2066 X, _, _, _, _, // 106 |
| 2067 X, _, _, _, _, // 107 |
| 2068 X, _, _, _, _, // 108 |
| 2069 X, _, _, _, _, // 109 |
| 2070 X, _, _, _, _, // 110 |
| 2071 X, _, _, _, _, // 111 |
| 2072 X, _, _, _, _, // 112 |
| 2073 X, _, _, _, _, // 113 |
| 2074 X, _, _, _, _, // 114 |
| 2075 X, _, _, _, _, // 115 |
| 2076 X, _, _, _, _, // 116 |
| 2077 X, _, _, _, _, // 117 |
| 2078 X, _, _, _, _, // 118 |
| 2079 X, _, _, _, _, // 119 |
| 2080 X, _, _, _, _, // 120 |
| 2081 X, _, _, _, _, // 121 |
| 2082 X, _, _, _, _, // 122 |
| 2083 X, _, _, _, _, // 123 |
| 2084 X, _, _, _, _, // 124 |
| 2085 X, _, _, _, _, // 125 |
| 2086 X, _, _, _, _, // 126 |
| 2087 X, _, _, _, _, // 127 |
| 2088 1, 7, _, _, _, // 128 |
| 2089 2, 0, 7, _, _, // 129 |
| 2090 2, 1, 7, _, _, // 130 |
| 2091 X, _, _, _, _, // 131 |
| 2092 2, 2, 7, _, _, // 132 |
| 2093 3, 0, 2, 7, _, // 133 |
| 2094 X, _, _, _, _, // 134 |
| 2095 X, _, _, _, _, // 135 |
| 2096 2, 3, 7, _, _, // 136 |
| 2097 3, 0, 3, 7, _, // 137 |
| 2098 3, 1, 3, 7, _, // 138 |
| 2099 X, _, _, _, _, // 139 |
| 2100 X, _, _, _, _, // 140 |
| 2101 X, _, _, _, _, // 141 |
| 2102 X, _, _, _, _, // 142 |
| 2103 X, _, _, _, _, // 143 |
| 2104 2, 4, 7, _, _, // 144 |
| 2105 3, 0, 4, 7, _, // 145 |
| 2106 3, 1, 4, 7, _, // 146 |
| 2107 X, _, _, _, _, // 147 |
| 2108 3, 2, 4, 7, _, // 148 |
| 2109 4, 0, 2, 4, 7, // 149 |
| 2110 X, _, _, _, _, // 150 |
| 2111 X, _, _, _, _, // 151 |
| 2112 X, _, _, _, _, // 152 |
| 2113 X, _, _, _, _, // 153 |
| 2114 X, _, _, _, _, // 154 |
| 2115 X, _, _, _, _, // 155 |
| 2116 X, _, _, _, _, // 156 |
| 2117 X, _, _, _, _, // 157 |
| 2118 X, _, _, _, _, // 158 |
| 2119 X, _, _, _, _, // 159 |
| 2120 2, 5, 7, _, _, // 160 |
| 2121 3, 0, 5, 7, _, // 161 |
| 2122 3, 1, 5, 7, _, // 162 |
| 2123 X, _, _, _, _, // 163 |
| 2124 3, 2, 5, 7, _, // 164 |
| 2125 4, 0, 2, 5, 7, // 165 |
| 2126 X, _, _, _, _, // 166 |
| 2127 X, _, _, _, _, // 167 |
| 2128 3, 3, 5, 7, _, // 168 |
| 2129 4, 0, 3, 5, 7, // 169 |
| 2130 4, 1, 3, 5, 7 // 170 |
| 2131 }; |
| 2132 #undef _ |
| 2133 #undef X |
| 2134 |
| 2135 |
| 2136 // Takes a word of mark bits. Returns the number of objects that start in the |
| 2137 // range. Puts the offsets of the words in the supplied array. |
| 2138 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) { |
| 2139 int objects = 0; |
| 2140 int offset = 0; |
| 2141 |
| 2142 // No consecutive 1 bits. |
| 2143 ASSERT((mark_bits & 0x180) != 0x180); |
| 2144 ASSERT((mark_bits & 0x18000) != 0x18000); |
| 2145 ASSERT((mark_bits & 0x1800000) != 0x1800000); |
| 2146 |
| 2147 while (mark_bits != 0) { |
| 2148 int byte = (mark_bits & 0xff); |
| 2149 mark_bits >>= 8; |
| 2150 if (byte != 0) { |
| 2151 ASSERT(byte < kStartTableLines); // No consecutive 1 bits. |
| 2152 char* table = kStartTable + byte * kStartTableEntriesPerLine; |
| 2153 int objects_in_these_8_words = table[0]; |
| 2154 ASSERT(objects_in_these_8_words != kStartTableInvalidLine); |
| 2155 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine); |
| 2156 for (int i = 0; i < objects_in_these_8_words; i++) { |
| 2157 starts[objects++] = offset + table[1 + i]; |
| 2158 } |
| 2159 } |
| 2160 offset += 8; |
| 2161 } |
| 2162 return objects; |
| 2163 } |
| 2164 |
| 2165 |
| 2166 static inline Address DigestFreeStart(Address approximate_free_start, |
| 2167 uint32_t free_start_cell) { |
| 2168 ASSERT(free_start_cell != 0); |
| 2169 |
| 2170 int offsets[16]; |
| 2171 uint32_t cell = free_start_cell; |
| 2172 int offset_of_last_live; |
| 2173 if ((cell & 0x80000000u) != 0) { |
| 2174 // This case would overflow below. |
| 2175 offset_of_last_live = 31; |
| 2176 } else { |
| 2177 // Remove all but one bit, the most significant. This is an optimization |
| 2178 // that may or may not be worthwhile. |
| 2179 cell |= cell >> 16; |
| 2180 cell |= cell >> 8; |
| 2181 cell |= cell >> 4; |
| 2182 cell |= cell >> 2; |
| 2183 cell |= cell >> 1; |
| 2184 cell = (cell + 1) >> 1; |
| 2185 int live_objects = MarkWordToObjectStarts(cell, offsets); |
| 2186 ASSERT(live_objects == 1); |
| 2187 offset_of_last_live = offsets[live_objects - 1]; |
| 2188 } |
| 2189 Address last_live_start = |
| 2190 approximate_free_start + offset_of_last_live * kPointerSize; |
| 2191 HeapObject* last_live = HeapObject::FromAddress(last_live_start); |
| 2192 Address free_start = last_live_start + last_live->Size(); |
| 2193 return free_start; |
| 2194 } |
| 2195 |
| 2196 |
| 2197 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { |
| 2198 ASSERT(cell != 0); |
| 2199 |
| 2200 int offsets[16]; |
| 2201 if (cell == 0x80000000u) { // Avoid overflow below. |
| 2202 return block_address + 31 * kPointerSize; |
| 2203 } |
| 2204 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; |
| 2205 ASSERT((first_set_bit & cell) == first_set_bit); |
| 2206 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); |
| 2207 ASSERT(live_objects == 1); |
| 2208 USE(live_objects); |
| 2209 return block_address + offsets[0] * kPointerSize; |
| 2210 } |
| 2211 |
| 2212 |
| 2213 // Sweeps a space conservatively. After this has been done the larger free |
| 2214 // spaces have been put on the free list and the smaller ones have been |
| 2215 // ignored and left untouched. A free space is always either ignored or put |
| 2216 // on the free list, never split up into two parts. This is important |
| 2217 // because it means that any FreeSpace maps left actually describe a region of |
| 2218 // memory that can be ignored when scanning. Dead objects other than free |
| 2219 // spaces will not contain the free space map. |
1936 static void SweepConservatively(PagedSpace* space, | 2220 static void SweepConservatively(PagedSpace* space, |
1937 Page* p, | 2221 Page* p) { |
1938 Address* last_free_start) { | |
1939 typedef Page::MarkbitsBitmap::CellType CellType; | |
1940 Page::MarkbitsBitmap* markbits = p->markbits(); | 2222 Page::MarkbitsBitmap* markbits = p->markbits(); |
1941 CellType* cells = markbits->cells(); | 2223 Page::MarkbitsBitmap::CellType* cells = markbits->cells(); |
1942 | 2224 |
1943 uint32_t last_cell_index = | 2225 p->SetFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY); |
| 2226 |
| 2227 // This is the start of the 32 word block that we are currently looking at. |
| 2228 Address block_address = p->ObjectAreaStart(); |
| 2229 |
| 2230 int last_cell_index = |
1944 Page::MarkbitsBitmap::IndexToCell( | 2231 Page::MarkbitsBitmap::IndexToCell( |
1945 Page::MarkbitsBitmap::CellAlignIndex( | 2232 Page::MarkbitsBitmap::CellAlignIndex( |
1946 p->AddressToMarkbitIndex(p->AllocationTop()))); | 2233 p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); |
1947 | 2234 |
1948 uint32_t cell_index = Page::kFirstUsedCell; | 2235 int cell_index = Page::kFirstUsedCell; |
1949 uint32_t polluted_cell_index = Page::kFirstUsedCell; | 2236 |
1950 if (cells[cell_index] == 0) { | 2237 // Skip over all the dead objects at the start of the page and mark them free. |
1951 polluted_cell_index = | 2238 for (cell_index = Page::kFirstUsedCell; |
1952 SweepFree(space, | 2239 cell_index < last_cell_index; |
1953 p, | 2240 cell_index++, block_address += 32 * kPointerSize) { |
1954 p->AddressToMarkbitIndex(p->ObjectAreaStart()), | 2241 if (cells[cell_index] != 0) break; |
1955 last_cell_index, | 2242 } |
1956 cells); | 2243 int size = block_address - p->ObjectAreaStart(); |
1957 | 2244 if (cell_index == last_cell_index) { |
1958 if (polluted_cell_index >= last_cell_index) { | 2245 space->Free(p->ObjectAreaStart(), size); |
1959 // All cells are free. | 2246 return; |
1960 *last_free_start = p->ObjectAreaStart(); | 2247 } |
1961 return; | 2248 // Grow the size of the start-of-page free space a little to get up to the |
| 2249 // first live object. |
| 2250 Address free_end = StartOfLiveObject(block_address, cells[cell_index]); |
| 2251 // Free the first free space. |
| 2252 size = free_end - p->ObjectAreaStart(); |
| 2253 space->Free(p->ObjectAreaStart(), size); |
| 2254 // The start of the current free area is represented in undigested form by |
| 2255 // the address of the last 32-word section that contained a live object and |
| 2256 // the marking bitmap for that cell, which describes where the live object |
| 2257 // started. Unless we find a large free space in the bitmap we will not |
| 2258 // digest this pair into a real address. We start the iteration here at the |
| 2259 // first word in the marking bit map that indicates a live object. |
| 2260 Address free_start = block_address; |
| 2261 uint32_t free_start_cell = cells[cell_index]; |
| 2262 |
| 2263 for ( ; |
| 2264 cell_index < last_cell_index; |
| 2265 cell_index++, block_address += 32 * kPointerSize) { |
| 2266 ASSERT((unsigned)cell_index == |
| 2267 Page::MarkbitsBitmap::IndexToCell( |
| 2268 Page::MarkbitsBitmap::CellAlignIndex( |
| 2269 p->AddressToMarkbitIndex(block_address)))); |
| 2270 uint32_t cell = cells[cell_index]; |
| 2271 if (cell != 0) { |
| 2272 // We have a live object. Check approximately whether it is more than 32 |
| 2273 // words since the last live object. |
| 2274 if (block_address - free_start > 32 * kPointerSize) { |
| 2275 free_start = DigestFreeStart(free_start, free_start_cell); |
| 2276 if (block_address - free_start > 32 * kPointerSize) { |
| 2277 // Now that we know the exact start of the free space it still looks |
| 2278 // like we have a large enough free space to be worth bothering with. |
| 2279 // so now we need to find the start of the first live object at the |
| 2280 // end of the free space. |
| 2281 free_end = StartOfLiveObject(block_address, cell); |
| 2282 space->Free(free_start, free_end - free_start); |
| 2283 } |
| 2284 } |
| 2285 // Update our undigested record of where the current free area started. |
| 2286 free_start = block_address; |
| 2287 free_start_cell = cell; |
1962 } | 2288 } |
1963 } | 2289 } |
1964 | 2290 |
1965 p->ClearFlag(Page::IS_CONTINUOUS); | 2291 // Handle the free space at the end of the page. |
1966 | 2292 if (block_address - free_start > 32 * kPointerSize) { |
1967 ASSERT(cells[polluted_cell_index] != 0); | 2293 free_start = DigestFreeStart(free_start, free_start_cell); |
1968 for (cell_index = NextCandidate(polluted_cell_index, last_cell_index, cells); | 2294 space->Free(free_start, block_address - free_start); |
| 2295 } |
| 2296 } |
| 2297 |
| 2298 |
| 2299 // Sweep a space precisely. After this has been done the space can |
| 2300 // be iterated precisely, hitting only the live objects. Code space |
| 2301 // is always swept precisely because we want to be able to iterate |
| 2302 // over it. Map space is swept precisely, because it is not compacted. |
| 2303 static void SweepPrecisely(PagedSpace* space, |
| 2304 Page* p) { |
| 2305 Page::MarkbitsBitmap* markbits = p->markbits(); |
| 2306 Page::MarkbitsBitmap::CellType* cells = markbits->cells(); |
| 2307 |
| 2308 p->ClearFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY); |
| 2309 |
| 2310 int last_cell_index = |
| 2311 Page::MarkbitsBitmap::IndexToCell( |
| 2312 Page::MarkbitsBitmap::CellAlignIndex( |
| 2313 p->AddressToMarkbitIndex(p->ObjectAreaEnd()))); |
| 2314 |
| 2315 int cell_index = Page::kFirstUsedCell; |
| 2316 Address free_start = p->ObjectAreaStart(); |
| 2317 ASSERT(reinterpret_cast<uint32_t>(free_start) % (32 * kPointerSize) == 0); |
| 2318 Address object_address = p->ObjectAreaStart(); |
| 2319 int offsets[16]; |
| 2320 |
| 2321 for (cell_index = Page::kFirstUsedCell; |
1969 cell_index < last_cell_index; | 2322 cell_index < last_cell_index; |
1970 cell_index = NextCandidate(polluted_cell_index, | 2323 cell_index++, object_address += 32 * kPointerSize) { |
1971 last_cell_index, | 2324 ASSERT((unsigned)cell_index == |
1972 cells)) { | 2325 Page::MarkbitsBitmap::IndexToCell( |
1973 ASSERT(cells[cell_index] == 0); | 2326 Page::MarkbitsBitmap::CellAlignIndex( |
1974 | 2327 p->AddressToMarkbitIndex(object_address)))); |
1975 int size = SizeOfPreviousObject(p, cell_index, cells); | 2328 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); |
1976 if (size <= 0) { | 2329 int live_index = 0; |
1977 polluted_cell_index = | 2330 for ( ; live_objects != 0; live_objects--) { |
1978 SweepFree(space, | 2331 Address free_end = object_address + offsets[live_index++] * kPointerSize; |
1979 p, | 2332 if (free_end != free_start) { |
1980 Page::MarkbitsBitmap::CellToIndex(cell_index), | 2333 space->Free(free_start, free_end - free_start); |
1981 last_cell_index, | |
1982 cells); | |
1983 if (polluted_cell_index >= last_cell_index) { | |
1984 // This free region is the last on the page. | |
1985 *last_free_start = p->MarkbitIndexToAddress( | |
1986 Page::MarkbitsBitmap::CellToIndex(cell_index)); | |
1987 return; | |
1988 } | 2334 } |
1989 } else { | 2335 HeapObject* live_object = HeapObject::FromAddress(free_end); |
1990 // Skip cells covered by this object. | 2336 free_start = free_end + live_object->Size(); |
1991 polluted_cell_index = cell_index + | |
1992 Page::MarkbitsBitmap::IndexToCell(size - 1); | |
1993 } | 2337 } |
1994 } | 2338 } |
1995 } | 2339 if (free_start != p->ObjectAreaEnd()) { |
1996 | 2340 space->Free(free_start, p->ObjectAreaEnd() - free_start); |
1997 | 2341 } |
1998 static void SweepPrecisely(PagedSpace* space, | |
1999 Page* p, | |
2000 Address* last_free_start) { | |
2001 bool is_previous_alive = true; | |
2002 Address free_start = NULL; | |
2003 HeapObject* object; | |
2004 | |
2005 for (Address current = p->ObjectAreaStart(); | |
2006 current < p->AllocationTop(); | |
2007 current += object->Size()) { | |
2008 object = HeapObject::FromAddress(current); | |
2009 if (Marking::IsMarked(object)) { | |
2010 Marking::ClearMark(object); | |
2011 MarkCompactCollector::tracer()->decrement_marked_count(); | |
2012 | |
2013 if (!is_previous_alive) { // Transition from free to live. | |
2014 space->DeallocateBlock(free_start, | |
2015 static_cast<int>(current - free_start), | |
2016 true); | |
2017 is_previous_alive = true; | |
2018 } | |
2019 } else { | |
2020 ASSERT((current + kPointerSize) >= p->AllocationTop() || | |
2021 object->Size() == kPointerSize || | |
2022 IncrementalMarking::IsWhite(object)); | |
2023 MarkCompactCollector::ReportDeleteIfNeeded(object); | |
2024 if (is_previous_alive) { // Transition from live to free. | |
2025 free_start = current; | |
2026 is_previous_alive = false; | |
2027 } | |
2028 } | |
2029 } | |
2030 | |
2031 if (!is_previous_alive) *last_free_start = free_start; | |
2032 } | 2342 } |
2033 | 2343 |
2034 | 2344 |
2035 void MarkCompactCollector::SweepSpace(PagedSpace* space, | 2345 void MarkCompactCollector::SweepSpace(PagedSpace* space, |
2036 SweeperType sweeper) { | 2346 SweeperType sweeper) { |
2037 PageIterator it(space, PageIterator::PAGES_IN_USE); | 2347 space->set_was_swept_conservatively(sweeper == CONSERVATIVE); |
| 2348 |
| 2349 // We don't have a linear allocation area while sweeping. It will be restored |
| 2350 // on the first allocation after the sweep. |
| 2351 space->SetTop(NULL, NULL); |
| 2352 |
| 2353 space->ClearStats(); |
| 2354 |
| 2355 PageIterator it(space); |
2038 | 2356 |
2039 // During sweeping of paged space we are trying to find longest sequences | 2357 // During sweeping of paged space we are trying to find longest sequences |
2040 // of pages without live objects and free them (instead of putting them on | 2358 // of pages without live objects and free them (instead of putting them on |
2041 // the free list). | 2359 // the free list). |
2042 | 2360 |
2043 // Page preceding current. | 2361 // Page preceding current. |
2044 Page* prev = Page::FromAddress(NULL); | 2362 Page* prev = Page::FromAddress(NULL); |
2045 | 2363 |
2046 // First empty page in a sequence. | |
2047 Page* first_empty_page = Page::FromAddress(NULL); | |
2048 | |
2049 // Page preceding first empty page. | |
2050 Page* prec_first_empty_page = Page::FromAddress(NULL); | |
2051 | |
2052 // If last used page of space ends with a sequence of dead objects | |
2053 // we can adjust allocation top instead of puting this free area into | |
2054 // the free list. Thus during sweeping we keep track of such areas | |
2055 // and defer their deallocation until the sweeping of the next page | |
2056 // is done: if one of the next pages contains live objects we have | |
2057 // to put such area into the free list. | |
2058 Address last_free_start = NULL; | |
2059 int last_free_size = 0; | |
2060 | |
2061 while (it.has_next()) { | 2364 while (it.has_next()) { |
2062 Page* p = it.next(); | 2365 Page* p = it.next(); |
2063 | 2366 space->IncreaseCapacity(p->ObjectAreaEnd() - p->ObjectAreaStart()); |
2064 Address free_start = p->AllocationTop(); | |
2065 | 2367 |
2066 if (sweeper == CONSERVATIVE) { | 2368 if (sweeper == CONSERVATIVE) { |
2067 SweepConservatively(space, p, &free_start); | 2369 SweepConservatively(space, p); |
2068 p->set_linearity_boundary(free_start); | |
2069 } else { | 2370 } else { |
2070 ASSERT(sweeper == PRECISE); | 2371 ASSERT(sweeper == PRECISE); |
2071 SweepPrecisely(space, p, &free_start); | 2372 SweepPrecisely(space, p); |
2072 } | 2373 } |
2073 | |
2074 bool page_is_empty = (p->ObjectAreaStart() == free_start); | |
2075 bool is_previous_alive = (free_start == p->AllocationTop()); | |
2076 | |
2077 ASSERT(free_start <= p->AllocationTop()); | |
2078 | |
2079 if (page_is_empty) { | |
2080 // This page is empty. Check whether we are in the middle of | |
2081 // sequence of empty pages and start one if not. | |
2082 if (!first_empty_page->is_valid()) { | |
2083 first_empty_page = p; | |
2084 prec_first_empty_page = prev; | |
2085 } | |
2086 | |
2087 if (!is_previous_alive) { | |
2088 // There are dead objects on this page. Update space accounting stats | |
2089 // without putting anything into free list. | |
2090 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start); | |
2091 if (size_in_bytes > 0) { | |
2092 space->DeallocateBlock(free_start, size_in_bytes, false); | |
2093 } | |
2094 } | |
2095 } else { | |
2096 // This page is not empty. Sequence of empty pages ended on the previous | |
2097 // one. | |
2098 if (first_empty_page->is_valid()) { | |
2099 space->FreePages(prec_first_empty_page, prev); | |
2100 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL); | |
2101 } | |
2102 | |
2103 // If there is a free ending area on one of the previous pages we have | |
2104 // deallocate that area and put it on the free list. | |
2105 if (last_free_size > 0) { | |
2106 Page::FromAddress(last_free_start)-> | |
2107 SetAllocationWatermark(last_free_start); | |
2108 space->DeallocateBlock(last_free_start, last_free_size, true); | |
2109 last_free_start = NULL; | |
2110 last_free_size = 0; | |
2111 } | |
2112 | |
2113 // If the last region of this page was not live we remember it. | |
2114 if (!is_previous_alive) { | |
2115 ASSERT(last_free_size == 0); | |
2116 last_free_size = static_cast<int>(p->AllocationTop() - free_start); | |
2117 last_free_start = free_start; | |
2118 } | |
2119 } | |
2120 | |
2121 prev = p; | 2374 prev = p; |
2122 } | 2375 } |
2123 | 2376 // TODO(gc): set up allocation top and limit using the free list. |
2124 // We reached end of space. See if we need to adjust allocation top. | |
2125 Address new_allocation_top = NULL; | |
2126 | |
2127 if (first_empty_page->is_valid()) { | |
2128 // Last used pages in space are empty. We can move allocation top backwards | |
2129 // to the beginning of first empty page. | |
2130 ASSERT(prev == space->AllocationTopPage()); | |
2131 space->FreePages(prec_first_empty_page, prev); | |
2132 new_allocation_top = first_empty_page->ObjectAreaStart(); | |
2133 } | |
2134 | |
2135 if (last_free_size > 0) { | |
2136 // There was a free ending area on the previous page. | |
2137 // Deallocate it without putting it into freelist and move allocation | |
2138 // top to the beginning of this free area. | |
2139 space->DeallocateBlock(last_free_start, last_free_size, false); | |
2140 new_allocation_top = last_free_start; | |
2141 } | |
2142 | |
2143 if (new_allocation_top != NULL) { | |
2144 #ifdef DEBUG | |
2145 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top); | |
2146 if (!first_empty_page->is_valid()) { | |
2147 ASSERT(new_allocation_top_page == space->AllocationTopPage()); | |
2148 } else if (last_free_size > 0) { | |
2149 ASSERT(new_allocation_top_page == prec_first_empty_page); | |
2150 } else { | |
2151 ASSERT(new_allocation_top_page == first_empty_page); | |
2152 } | |
2153 #endif | |
2154 | |
2155 space->SetTop(new_allocation_top); | |
2156 } | |
2157 } | 2377 } |
2158 | 2378 |
2159 | 2379 |
2160 void MarkCompactCollector::SweepSpaces() { | 2380 void MarkCompactCollector::SweepSpaces() { |
2161 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); | 2381 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); |
2162 #ifdef DEBUG | 2382 #ifdef DEBUG |
2163 state_ = SWEEP_SPACES; | 2383 state_ = SWEEP_SPACES; |
2164 #endif | 2384 #endif |
2165 | 2385 |
2166 #ifndef DEBUG | |
2167 SweeperType fast_sweeper = CONSERVATIVE; | |
2168 #else | |
2169 SweeperType fast_sweeper = PRECISE; | |
2170 #endif | |
2171 | |
2172 ASSERT(!IsCompacting()); | 2386 ASSERT(!IsCompacting()); |
| 2387 SweeperType how_to_sweep = CONSERVATIVE; |
| 2388 if (sweep_precisely_) how_to_sweep = PRECISE; |
2173 // Noncompacting collections simply sweep the spaces to clear the mark | 2389 // Noncompacting collections simply sweep the spaces to clear the mark |
2174 // bits and free the nonlive blocks (for old and map spaces). We sweep | 2390 // bits and free the nonlive blocks (for old and map spaces). We sweep |
2175 // the map space last because freeing non-live maps overwrites them and | 2391 // the map space last because freeing non-live maps overwrites them and |
2176 // the other spaces rely on possibly non-live maps to get the sizes for | 2392 // the other spaces rely on possibly non-live maps to get the sizes for |
2177 // non-live objects. | 2393 // non-live objects. |
2178 SweepSpace(Heap::old_pointer_space(), fast_sweeper); | 2394 SweepSpace(Heap::old_pointer_space(), how_to_sweep); |
2179 SweepSpace(Heap::old_data_space(), fast_sweeper); | 2395 SweepSpace(Heap::old_data_space(), how_to_sweep); |
2180 SweepSpace(Heap::code_space(), PRECISE); | 2396 SweepSpace(Heap::code_space(), PRECISE); |
2181 // TODO(gc): implement specialized sweeper for cell space. | 2397 // TODO(gc): implement specialized sweeper for cell space. |
2182 SweepSpace(Heap::cell_space(), fast_sweeper); | 2398 SweepSpace(Heap::cell_space(), PRECISE); |
2183 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); | 2399 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); |
2184 SweepNewSpace(Heap::new_space()); | 2400 SweepNewSpace(Heap::new_space()); |
2185 } | 2401 } |
2186 // TODO(gc): ClearNonLiveTransitions depends on precise sweeping of | 2402 // TODO(gc): ClearNonLiveTransitions depends on precise sweeping of |
2187 // map space to detect whether unmarked map became dead in this | 2403 // map space to detect whether unmarked map became dead in this |
2188 // collection or in one of the previous ones. | 2404 // collection or in one of the previous ones. |
2189 // TODO(gc): Implement specialized sweeper for map space. | 2405 // TODO(gc): Implement specialized sweeper for map space. |
2190 SweepSpace(Heap::map_space(), PRECISE); | 2406 SweepSpace(Heap::map_space(), PRECISE); |
2191 | 2407 |
2192 ASSERT(live_map_objects_size_ <= Heap::map_space()->Size()); | 2408 ASSERT(live_map_objects_size_ <= Heap::map_space()->Size()); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2226 int MarkCompactCollector::IterateLiveObjects(NewSpace* space, | 2442 int MarkCompactCollector::IterateLiveObjects(NewSpace* space, |
2227 HeapObjectCallback size_f) { | 2443 HeapObjectCallback size_f) { |
2228 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); | 2444 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); |
2229 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f); | 2445 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f); |
2230 } | 2446 } |
2231 | 2447 |
2232 | 2448 |
2233 int MarkCompactCollector::IterateLiveObjects(PagedSpace* space, | 2449 int MarkCompactCollector::IterateLiveObjects(PagedSpace* space, |
2234 HeapObjectCallback size_f) { | 2450 HeapObjectCallback size_f) { |
2235 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); | 2451 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); |
| 2452 // TODO(gc): Do a mark-sweep first with precise sweeping. |
2236 int total = 0; | 2453 int total = 0; |
2237 PageIterator it(space, PageIterator::PAGES_IN_USE); | 2454 PageIterator it(space); |
2238 while (it.has_next()) { | 2455 while (it.has_next()) { |
2239 Page* p = it.next(); | 2456 Page* p = it.next(); |
2240 total += IterateLiveObjectsInRange(p->ObjectAreaStart(), | 2457 total += IterateLiveObjectsInRange(p->ObjectAreaStart(), |
2241 p->AllocationTop(), | 2458 p->ObjectAreaEnd(), |
2242 size_f); | 2459 size_f); |
2243 } | 2460 } |
2244 return total; | 2461 return total; |
2245 } | 2462 } |
2246 | 2463 |
2247 | 2464 |
2248 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) { | 2465 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) { |
2249 #ifdef ENABLE_GDB_JIT_INTERFACE | 2466 #ifdef ENABLE_GDB_JIT_INTERFACE |
2250 if (obj->IsCode()) { | 2467 if (obj->IsCode()) { |
2251 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); | 2468 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); |
2252 } | 2469 } |
2253 #endif | 2470 #endif |
2254 #ifdef ENABLE_LOGGING_AND_PROFILING | 2471 #ifdef ENABLE_LOGGING_AND_PROFILING |
2255 if (obj->IsCode()) { | 2472 if (obj->IsCode()) { |
2256 PROFILE(CodeDeleteEvent(obj->address())); | 2473 PROFILE(CodeDeleteEvent(obj->address())); |
2257 } | 2474 } |
2258 #endif | 2475 #endif |
2259 } | 2476 } |
2260 | 2477 |
2261 | 2478 |
2262 void MarkCompactCollector::Initialize() { | 2479 void MarkCompactCollector::Initialize() { |
2263 StaticPointersToNewGenUpdatingVisitor::Initialize(); | 2480 StaticPointersToNewGenUpdatingVisitor::Initialize(); |
2264 StaticMarkingVisitor::Initialize(); | 2481 StaticMarkingVisitor::Initialize(); |
2265 } | 2482 } |
2266 | 2483 |
2267 | 2484 |
2268 } } // namespace v8::internal | 2485 } } // namespace v8::internal |
OLD | NEW |