Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(44)

Side by Side Diff: src/mark-compact.cc

Issue 6639024: Get rid of distinction between below- and above-watermark in page allocation.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
(...skipping 26 matching lines...) Expand all
38 #include "objects-visiting.h" 38 #include "objects-visiting.h"
39 #include "stub-cache.h" 39 #include "stub-cache.h"
40 40
41 namespace v8 { 41 namespace v8 {
42 namespace internal { 42 namespace internal {
43 43
44 // ------------------------------------------------------------------------- 44 // -------------------------------------------------------------------------
45 // MarkCompactCollector 45 // MarkCompactCollector
46 46
47 bool MarkCompactCollector::force_compaction_ = false; 47 bool MarkCompactCollector::force_compaction_ = false;
48 bool MarkCompactCollector::sweep_precisely_ = false;
48 bool MarkCompactCollector::compacting_collection_ = false; 49 bool MarkCompactCollector::compacting_collection_ = false;
49 bool MarkCompactCollector::compact_on_next_gc_ = false; 50 bool MarkCompactCollector::compact_on_next_gc_ = false;
50 51
51 GCTracer* MarkCompactCollector::tracer_ = NULL; 52 GCTracer* MarkCompactCollector::tracer_ = NULL;
52 53
53 #ifdef DEBUG 54 #ifdef DEBUG
54 MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE; 55 MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
55 56
56 // Counters used for debugging the marking phase of mark-compact or mark-sweep 57 // Counters used for debugging the marking phase of mark-compact or mark-sweep
57 // collection. 58 // collection.
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
184 // Check that swept all marked objects and 185 // Check that swept all marked objects and
185 // null out the GC tracer. 186 // null out the GC tracer.
186 // TODO(gc) does not work with conservative sweeping. 187 // TODO(gc) does not work with conservative sweeping.
187 // ASSERT(tracer_->marked_count() == 0); 188 // ASSERT(tracer_->marked_count() == 0);
188 tracer_ = NULL; 189 tracer_ = NULL;
189 } 190 }
190 191
191 192
192 #ifdef DEBUG 193 #ifdef DEBUG
193 static void VerifyMarkbitsAreClean(PagedSpace* space) { 194 static void VerifyMarkbitsAreClean(PagedSpace* space) {
194 PageIterator it(space, PageIterator::PAGES_IN_USE); 195 PageIterator it(space);
195 196
196 while (it.has_next()) { 197 while (it.has_next()) {
197 Page* p = it.next(); 198 Page* p = it.next();
198 ASSERT(p->markbits()->IsClean()); 199 ASSERT(p->markbits()->IsClean());
199 } 200 }
200 } 201 }
201 202
202 static void VerifyMarkbitsAreClean() { 203 static void VerifyMarkbitsAreClean() {
203 VerifyMarkbitsAreClean(Heap::old_pointer_space()); 204 VerifyMarkbitsAreClean(Heap::old_pointer_space());
204 VerifyMarkbitsAreClean(Heap::old_data_space()); 205 VerifyMarkbitsAreClean(Heap::old_data_space());
205 VerifyMarkbitsAreClean(Heap::code_space()); 206 VerifyMarkbitsAreClean(Heap::code_space());
206 VerifyMarkbitsAreClean(Heap::cell_space()); 207 VerifyMarkbitsAreClean(Heap::cell_space());
207 VerifyMarkbitsAreClean(Heap::map_space()); 208 VerifyMarkbitsAreClean(Heap::map_space());
208 } 209 }
209 #endif 210 #endif
210 211
211 212
212 static void ClearMarkbits(PagedSpace* space) { 213 static void ClearMarkbits(PagedSpace* space) {
213 PageIterator it(space, PageIterator::PAGES_IN_USE); 214 PageIterator it(space);
214 215
215 while (it.has_next()) { 216 while (it.has_next()) {
216 Page* p = it.next(); 217 Page* p = it.next();
217 p->markbits()->Clear(); 218 p->markbits()->Clear();
218 } 219 }
219 } 220 }
220 221
222
221 static void ClearMarkbits() { 223 static void ClearMarkbits() {
222 // We are sweeping code and map spaces precisely so clearing is not required. 224 // TODO(gc): Clean the mark bits while sweeping.
225 ClearMarkbits(Heap::code_space());
226 ClearMarkbits(Heap::map_space());
223 ClearMarkbits(Heap::old_pointer_space()); 227 ClearMarkbits(Heap::old_pointer_space());
224 ClearMarkbits(Heap::old_data_space()); 228 ClearMarkbits(Heap::old_data_space());
225 ClearMarkbits(Heap::cell_space()); 229 ClearMarkbits(Heap::cell_space());
226 } 230 }
227 231
228 232
229 void Marking::TransferMark(Address old_start, Address new_start) { 233 void Marking::TransferMark(Address old_start, Address new_start) {
230 if (IncrementalMarking::state() == IncrementalMarking::MARKING) { 234 if (IncrementalMarking::state() == IncrementalMarking::MARKING) {
231 if (IncrementalMarking::IsBlack(HeapObject::FromAddress(old_start))) { 235 if (IncrementalMarking::IsBlack(HeapObject::FromAddress(old_start))) {
232 IncrementalMarking::MarkBlack(HeapObject::FromAddress(new_start)); 236 IncrementalMarking::MarkBlack(HeapObject::FromAddress(new_start));
233 } else if (IncrementalMarking::IsGrey(HeapObject::FromAddress(old_start))) { 237 } else if (IncrementalMarking::IsGrey(HeapObject::FromAddress(old_start))) {
234 IncrementalMarking::WhiteToGrey(HeapObject::FromAddress(new_start)); 238 IncrementalMarking::WhiteToGrey(HeapObject::FromAddress(new_start));
235 // TODO(gc): if we shift huge array in the loop we might end up pushing 239 // TODO(gc): if we shift huge array in the loop we might end up pushing
236 // to much to marking stack. maybe we should check one or two elements 240 // to much to marking stack. maybe we should check one or two elements
237 // on top of it to see whether they are equal to old_start. 241 // on top of it to see whether they are equal to old_start.
238 } 242 }
239 } else { 243 } else {
240 if (Heap::InNewSpace(old_start) || 244 if (Heap::InNewSpace(old_start) || !IsMarked(old_start)) {
241 Page::FromAddress(old_start)->IsFlagSet(Page::IS_CONTINUOUS) ||
242 !IsMarked(old_start)) {
243 return; 245 return;
244 } 246 }
245 SetMark(new_start); 247 SetMark(new_start);
246 } 248 }
247 } 249 }
248 250
249 251
250 void MarkCompactCollector::Prepare(GCTracer* tracer) { 252 void MarkCompactCollector::Prepare(GCTracer* tracer) {
251 FLAG_flush_code = false; 253 FLAG_flush_code = false;
252 FLAG_always_compact = false; 254 FLAG_always_compact = false;
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
335 // We compact the old generation on the next GC if it has gotten too 337 // We compact the old generation on the next GC if it has gotten too
336 // fragmented (ie, we could recover an expected amount of space by 338 // fragmented (ie, we could recover an expected amount of space by
337 // reclaiming the waste and free list blocks). 339 // reclaiming the waste and free list blocks).
338 static const int kFragmentationLimit = 15; // Percent. 340 static const int kFragmentationLimit = 15; // Percent.
339 static const int kFragmentationAllowed = 1 * MB; // Absolute. 341 static const int kFragmentationAllowed = 1 * MB; // Absolute.
340 intptr_t old_gen_recoverable = 0; 342 intptr_t old_gen_recoverable = 0;
341 intptr_t old_gen_used = 0; 343 intptr_t old_gen_used = 0;
342 344
343 OldSpaces spaces; 345 OldSpaces spaces;
344 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { 346 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
345 old_gen_recoverable += space->Waste() + space->AvailableFree(); 347 old_gen_recoverable += space->Waste() + space->Available();
346 old_gen_used += space->Size(); 348 old_gen_used += space->Size();
347 } 349 }
348 350
349 int old_gen_fragmentation = 351 int old_gen_fragmentation =
350 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used); 352 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
351 if (old_gen_fragmentation > kFragmentationLimit && 353 if (old_gen_fragmentation > kFragmentationLimit &&
352 old_gen_recoverable > kFragmentationAllowed) { 354 old_gen_recoverable > kFragmentationAllowed) {
353 compact_on_next_gc_ = true; 355 compact_on_next_gc_ = true;
354 } 356 }
355 } 357 }
(...skipping 880 matching lines...) Expand 10 before | Expand all | Expand 10 after
1236 } 1238 }
1237 } 1239 }
1238 // The DescriptorArray descriptors contains a pointer to its contents array, 1240 // The DescriptorArray descriptors contains a pointer to its contents array,
1239 // but the contents array is already marked. 1241 // but the contents array is already marked.
1240 marking_stack.Push(descriptors); 1242 marking_stack.Push(descriptors);
1241 } 1243 }
1242 1244
1243 1245
1244 void MarkCompactCollector::CreateBackPointers() { 1246 void MarkCompactCollector::CreateBackPointers() {
1245 HeapObjectIterator iterator(Heap::map_space()); 1247 HeapObjectIterator iterator(Heap::map_space());
1246 for (HeapObject* next_object = iterator.next(); 1248 for (HeapObject* next_object = iterator.Next();
1247 next_object != NULL; next_object = iterator.next()) { 1249 next_object != NULL; next_object = iterator.Next()) {
1248 if (next_object->IsMap()) { // Could also be ByteArray on free list. 1250 if (next_object->IsMap()) { // Could also be ByteArray on free list.
1249 Map* map = Map::cast(next_object); 1251 Map* map = Map::cast(next_object);
1250 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE && 1252 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
1251 map->instance_type() <= JS_FUNCTION_TYPE) { 1253 map->instance_type() <= JS_FUNCTION_TYPE) {
1252 map->CreateBackPointers(); 1254 map->CreateBackPointers();
1253 } else { 1255 } else {
1254 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array()); 1256 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
1255 } 1257 }
1256 } 1258 }
1257 } 1259 }
(...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after
1557 // Iterate over the map space, setting map transitions that go from 1559 // Iterate over the map space, setting map transitions that go from
1558 // a marked map to an unmarked map to null transitions. At the same time, 1560 // a marked map to an unmarked map to null transitions. At the same time,
1559 // set all the prototype fields of maps back to their original value, 1561 // set all the prototype fields of maps back to their original value,
1560 // dropping the back pointers temporarily stored in the prototype field. 1562 // dropping the back pointers temporarily stored in the prototype field.
1561 // Setting the prototype field requires following the linked list of 1563 // Setting the prototype field requires following the linked list of
1562 // back pointers, reversing them all at once. This allows us to find 1564 // back pointers, reversing them all at once. This allows us to find
1563 // those maps with map transitions that need to be nulled, and only 1565 // those maps with map transitions that need to be nulled, and only
1564 // scan the descriptor arrays of those maps, not all maps. 1566 // scan the descriptor arrays of those maps, not all maps.
1565 // All of these actions are carried out only on maps of JSObjects 1567 // All of these actions are carried out only on maps of JSObjects
1566 // and related subtypes. 1568 // and related subtypes.
1567 for (HeapObject* obj = map_iterator.next(); 1569 for (HeapObject* obj = map_iterator.Next();
1568 obj != NULL; obj = map_iterator.next()) { 1570 obj != NULL; obj = map_iterator.Next()) {
1569 Map* map = reinterpret_cast<Map*>(obj); 1571 Map* map = reinterpret_cast<Map*>(obj);
1570 if (!Marking::IsMarked(map) && map->IsByteArray()) continue; 1572 if (!Marking::IsMarked(map) && map->IsByteArray()) continue;
1571 1573
1572 ASSERT(SafeIsMap(map)); 1574 ASSERT(SafeIsMap(map));
1573 // Only JSObject and subtypes have map transitions and back pointers. 1575 // Only JSObject and subtypes have map transitions and back pointers.
1574 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue; 1576 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
1575 if (map->instance_type() > JS_FUNCTION_TYPE) continue; 1577 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
1576 1578
1577 if (Marking::IsMarked(map) && 1579 if (Marking::IsMarked(map) &&
1578 map->attached_to_shared_function_info()) { 1580 map->attached_to_shared_function_info()) {
(...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after
1823 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE); 1825 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
1824 LiveObjectList::IterateElements(&updating_visitor); 1826 LiveObjectList::IterateElements(&updating_visitor);
1825 1827
1826 { 1828 {
1827 StoreBufferRebuildScope scope; 1829 StoreBufferRebuildScope scope;
1828 StoreBuffer::IteratePointersToNewSpace(&UpdatePointerToNewGen); 1830 StoreBuffer::IteratePointersToNewSpace(&UpdatePointerToNewGen);
1829 } 1831 }
1830 1832
1831 // Update pointers from cells. 1833 // Update pointers from cells.
1832 HeapObjectIterator cell_iterator(Heap::cell_space()); 1834 HeapObjectIterator cell_iterator(Heap::cell_space());
1833 for (HeapObject* cell = cell_iterator.next(); 1835 for (HeapObject* cell = cell_iterator.Next();
1834 cell != NULL; 1836 cell != NULL;
1835 cell = cell_iterator.next()) { 1837 cell = cell_iterator.Next()) {
1836 if (cell->IsJSGlobalPropertyCell()) { 1838 if (cell->IsJSGlobalPropertyCell()) {
1837 Address value_address = 1839 Address value_address =
1838 reinterpret_cast<Address>(cell) + 1840 reinterpret_cast<Address>(cell) +
1839 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 1841 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1840 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); 1842 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1841 } 1843 }
1842 } 1844 }
1843 1845
1844 // Update pointer from the global contexts list. 1846 // Update pointer from the global contexts list.
1845 updating_visitor.VisitPointer(Heap::global_contexts_list_address()); 1847 updating_visitor.VisitPointer(Heap::global_contexts_list_address());
(...skipping 24 matching lines...) Expand all
1870 ASSERT(cells[free_cell_index] == 0); 1872 ASSERT(cells[free_cell_index] == 0);
1871 while (free_cell_index < region_end && cells[free_cell_index] == 0) { 1873 while (free_cell_index < region_end && cells[free_cell_index] == 0) {
1872 free_cell_index++; 1874 free_cell_index++;
1873 } 1875 }
1874 1876
1875 if (free_cell_index >= region_end) { 1877 if (free_cell_index >= region_end) {
1876 return free_cell_index; 1878 return free_cell_index;
1877 } 1879 }
1878 1880
1879 uint32_t free_end = Page::MarkbitsBitmap::CellToIndex(free_cell_index); 1881 uint32_t free_end = Page::MarkbitsBitmap::CellToIndex(free_cell_index);
1880 space->DeallocateBlock(p->MarkbitIndexToAddress(free_start), 1882 space->Free(p->MarkbitIndexToAddress(free_start),
1881 (free_end - free_start) << kPointerSizeLog2, 1883 (free_end - free_start) << kPointerSizeLog2);
1882 true);
1883 1884
1884 return free_cell_index; 1885 return free_cell_index;
1885 } 1886 }
1886 1887
1887 1888
1888 INLINE(static uint32_t NextCandidate(uint32_t cell_index, 1889 INLINE(static uint32_t NextCandidate(uint32_t cell_index,
1889 uint32_t last_cell_index, 1890 uint32_t last_cell_index,
1890 uint32_t* cells)); 1891 uint32_t* cells));
1891 1892
1892 1893
(...skipping 22 matching lines...) Expand all
1915 CompilerIntrinsics::CountLeadingZeros(cells[cell_index - 1]) + 1; 1916 CompilerIntrinsics::CountLeadingZeros(cells[cell_index - 1]) + 1;
1916 Address addr = 1917 Address addr =
1917 p->MarkbitIndexToAddress( 1918 p->MarkbitIndexToAddress(
1918 Page::MarkbitsBitmap::CellToIndex(cell_index) - leading_zeroes); 1919 Page::MarkbitsBitmap::CellToIndex(cell_index) - leading_zeroes);
1919 HeapObject* obj = HeapObject::FromAddress(addr); 1920 HeapObject* obj = HeapObject::FromAddress(addr);
1920 ASSERT(obj->map()->IsMap()); 1921 ASSERT(obj->map()->IsMap());
1921 return (obj->Size() >> kPointerSizeLog2) - leading_zeroes; 1922 return (obj->Size() >> kPointerSizeLog2) - leading_zeroes;
1922 } 1923 }
1923 1924
1924 1925
1926 static const int kStartTableStride = 5;
1927 static const int kStartTableSize = 171;
1928 static const int kStartTableInvalidEntry = 127;
1929 static const int kStartTableUnusedEntry = 126;
1930
1931 #define _ kStartTableUnusedEntry
1932 #define X kStartTableInvalidEntry
1933 // Mark-bit to object start offset. Since objects are at least 2 words
1934 // large we don't have entries for two consecutive 1 bits. All entries
1935 // after 170 have at least 2 consecutive bits.
1936 char kStartTable[kStartTableSize * kStartTableStride] = {
Vyacheslav Egorov (Chromium) 2011/03/15 09:20:09 Stride and Size are really mysterious names --- mo
Erik Corry 2011/03/17 13:39:17 Done.
1937 0, _, _, _, _, // 0
1938 1, 0, _, _, _, // 1
1939 1, 1, _, _, _, // 2
1940 X, _, _, _, _, // 3
1941 1, 2, _, _, _, // 4
1942 2, 0, 2, _, _, // 5
1943 X, _, _, _, _, // 6
1944 X, _, _, _, _, // 7
1945 1, 3, _, _, _, // 8
1946 2, 0, 3, _, _, // 9
1947 2, 1, 3, _, _, // 10
1948 X, _, _, _, _, // 11
1949 X, _, _, _, _, // 12
1950 X, _, _, _, _, // 13
1951 X, _, _, _, _, // 14
1952 X, _, _, _, _, // 15
1953 1, 4, _, _, _, // 16
1954 2, 0, 4, _, _, // 17
1955 2, 1, 4, _, _, // 18
1956 X, _, _, _, _, // 19
1957 2, 2, 4, _, _, // 20
1958 3, 0, 2, 4, _, // 21
1959 X, _, _, _, _, // 22
1960 X, _, _, _, _, // 23
1961 X, _, _, _, _, // 24
1962 X, _, _, _, _, // 25
1963 X, _, _, _, _, // 26
1964 X, _, _, _, _, // 27
1965 X, _, _, _, _, // 28
1966 X, _, _, _, _, // 29
1967 X, _, _, _, _, // 30
1968 X, _, _, _, _, // 31
1969 1, 5, _, _, _, // 32
1970 2, 0, 5, _, _, // 33
1971 2, 1, 5, _, _, // 34
1972 X, _, _, _, _, // 35
1973 2, 2, 5, _, _, // 36
1974 3, 0, 2, 5, _, // 37
1975 X, _, _, _, _, // 38
1976 X, _, _, _, _, // 39
1977 2, 3, 5, _, _, // 40
1978 3, 0, 3, 5, _, // 41
1979 3, 1, 3, 5, _, // 42
1980 X, _, _, _, _, // 43
1981 X, _, _, _, _, // 44
1982 X, _, _, _, _, // 45
1983 X, _, _, _, _, // 46
1984 X, _, _, _, _, // 47
1985 X, _, _, _, _, // 48
1986 X, _, _, _, _, // 49
1987 X, _, _, _, _, // 50
1988 X, _, _, _, _, // 51
1989 X, _, _, _, _, // 52
1990 X, _, _, _, _, // 53
1991 X, _, _, _, _, // 54
1992 X, _, _, _, _, // 55
1993 X, _, _, _, _, // 56
1994 X, _, _, _, _, // 57
1995 X, _, _, _, _, // 58
1996 X, _, _, _, _, // 59
1997 X, _, _, _, _, // 60
1998 X, _, _, _, _, // 61
1999 X, _, _, _, _, // 62
2000 X, _, _, _, _, // 63
2001 1, 6, _, _, _, // 64
2002 2, 0, 6, _, _, // 65
2003 2, 1, 6, _, _, // 66
2004 X, _, _, _, _, // 67
2005 2, 2, 6, _, _, // 68
2006 3, 0, 2, 6, _, // 69
2007 X, _, _, _, _, // 70
2008 X, _, _, _, _, // 71
2009 2, 3, 6, _, _, // 72
2010 3, 0, 3, 6, _, // 73
2011 3, 1, 3, 6, _, // 74
2012 X, _, _, _, _, // 75
2013 X, _, _, _, _, // 76
2014 X, _, _, _, _, // 77
2015 X, _, _, _, _, // 78
2016 X, _, _, _, _, // 79
2017 2, 4, 6, _, _, // 80
2018 3, 0, 4, 6, _, // 81
2019 3, 1, 4, 6, _, // 82
2020 X, _, _, _, _, // 83
2021 3, 2, 4, 6, _, // 84
2022 4, 0, 2, 4, 6, // 85
2023 X, _, _, _, _, // 86
2024 X, _, _, _, _, // 87
2025 X, _, _, _, _, // 88
2026 X, _, _, _, _, // 89
2027 X, _, _, _, _, // 90
2028 X, _, _, _, _, // 91
2029 X, _, _, _, _, // 92
2030 X, _, _, _, _, // 93
2031 X, _, _, _, _, // 94
2032 X, _, _, _, _, // 95
2033 X, _, _, _, _, // 96
2034 X, _, _, _, _, // 97
2035 X, _, _, _, _, // 98
2036 X, _, _, _, _, // 99
2037 X, _, _, _, _, // 100
2038 X, _, _, _, _, // 101
2039 X, _, _, _, _, // 102
2040 X, _, _, _, _, // 103
2041 X, _, _, _, _, // 104
2042 X, _, _, _, _, // 105
2043 X, _, _, _, _, // 106
2044 X, _, _, _, _, // 107
2045 X, _, _, _, _, // 108
2046 X, _, _, _, _, // 109
2047 X, _, _, _, _, // 110
2048 X, _, _, _, _, // 111
2049 X, _, _, _, _, // 112
2050 X, _, _, _, _, // 113
2051 X, _, _, _, _, // 114
2052 X, _, _, _, _, // 115
2053 X, _, _, _, _, // 116
2054 X, _, _, _, _, // 117
2055 X, _, _, _, _, // 118
2056 X, _, _, _, _, // 119
2057 X, _, _, _, _, // 120
2058 X, _, _, _, _, // 121
2059 X, _, _, _, _, // 122
2060 X, _, _, _, _, // 123
2061 X, _, _, _, _, // 124
2062 X, _, _, _, _, // 125
2063 X, _, _, _, _, // 126
2064 X, _, _, _, _, // 127
2065 1, 7, _, _, _, // 128
2066 2, 0, 7, _, _, // 129
2067 2, 1, 7, _, _, // 130
2068 X, _, _, _, _, // 131
2069 2, 2, 7, _, _, // 132
2070 3, 0, 2, 7, _, // 133
2071 X, _, _, _, _, // 134
2072 X, _, _, _, _, // 135
2073 2, 3, 7, _, _, // 136
2074 3, 0, 3, 7, _, // 137
2075 3, 1, 3, 7, _, // 138
2076 X, _, _, _, _, // 139
2077 X, _, _, _, _, // 140
2078 X, _, _, _, _, // 141
2079 X, _, _, _, _, // 142
2080 X, _, _, _, _, // 143
2081 2, 4, 7, _, _, // 144
2082 3, 0, 4, 7, _, // 145
2083 3, 1, 4, 7, _, // 146
2084 X, _, _, _, _, // 147
2085 3, 2, 4, 7, _, // 148
2086 4, 0, 2, 4, 7, // 149
2087 X, _, _, _, _, // 150
2088 X, _, _, _, _, // 151
2089 X, _, _, _, _, // 152
2090 X, _, _, _, _, // 153
2091 X, _, _, _, _, // 154
2092 X, _, _, _, _, // 155
2093 X, _, _, _, _, // 156
2094 X, _, _, _, _, // 157
2095 X, _, _, _, _, // 158
2096 X, _, _, _, _, // 159
2097 2, 5, 7, _, _, // 160
2098 3, 0, 5, 7, _, // 161
2099 3, 1, 5, 7, _, // 162
2100 X, _, _, _, _, // 163
2101 3, 2, 5, 7, _, // 164
2102 4, 0, 2, 5, 7, // 165
2103 X, _, _, _, _, // 166
2104 X, _, _, _, _, // 167
2105 3, 3, 5, 7, _, // 168
2106 4, 0, 3, 5, 7, // 169
2107 4, 1, 3, 5, 7 // 170
2108 };
2109 #undef _
2110 #undef X
2111
2112
2113 // Takes a word of mark bits. Returns the number of objects that start in the
2114 // range. Puts the offsets of the words in the supplied array.
2115 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
2116 int objects = 0;
2117 int offset = 0;
2118
2119 // No consecutive 1 bits.
2120 ASSERT((mark_bits & 0x180) != 0x180);
2121 ASSERT((mark_bits & 0x18000) != 0x18000);
2122 ASSERT((mark_bits & 0x1800000) != 0x1800000);
2123
2124 while (mark_bits != 0) {
2125 int byte = (mark_bits & 0xff);
2126 mark_bits >>= 8;
2127 if (byte != 0) {
2128 ASSERT(byte < kStartTableSize); // No consecutive 1 bits.
2129 char* table = kStartTable + byte * kStartTableStride;
2130 int objects_in_these_8_words = table[0];
2131 ASSERT(objects_in_these_8_words != kStartTableInvalidEntry);
2132 ASSERT(objects_in_these_8_words < kStartTableStride);
2133 for (int i = 0; i < objects_in_these_8_words; i++) {
2134 starts[objects++] = offset + table[1 + i];
2135 }
2136 }
2137 offset += 8;
2138 }
2139 return objects;
2140 }
2141
2142
2143 static inline Address DigestFreeStart(Address approximate_free_start,
2144 uint32_t free_start_cell) {
2145 ASSERT(free_start_cell != 0);
2146
2147 int offsets[16];
2148 uint32_t cell = free_start_cell;
2149 int offset_of_last_live;
2150 if ((cell & 0x80000000u) != 0) {
2151 // This case would overflow below.
2152 offset_of_last_live = 31;
2153 } else {
2154 // Remove all but one bit, the most significant. This is an optimization
2155 // that may or may not be worthwhile.
2156 cell |= cell >> 16;
2157 cell |= cell >> 8;
2158 cell |= cell >> 4;
2159 cell |= cell >> 2;
2160 cell |= cell >> 1;
2161 cell = (cell + 1) >> 1;
2162 int live_objects = MarkWordToObjectStarts(cell, offsets);
2163 ASSERT(live_objects == 1);
2164 offset_of_last_live = offsets[live_objects - 1];
2165 }
2166 Address last_live_start =
2167 approximate_free_start + offset_of_last_live * kPointerSize;
2168 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
2169 Address free_start = last_live_start + last_live->Size();
2170 return free_start;
2171 }
2172
2173
2174 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
2175 ASSERT(cell != 0);
2176
2177 int offsets[16];
2178 if (cell == 0x80000000u) { // Avoid overflow below.
2179 return block_address + 31 * kPointerSize;
2180 }
2181 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
2182 ASSERT((first_set_bit & cell) == first_set_bit);
2183 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
2184 ASSERT(live_objects == 1);
2185 USE(live_objects);
2186 return block_address + offsets[0] * kPointerSize;
2187 }
2188
2189
2190 // Sweeps a space conservatively. After this has been done the larger free
2191 // spaces have been put on the free list and the smaller ones have been
2192 // ignored and left untouched. A free space is always either ignored or put
2193 // on the free list, never split up into two parts. This is important
2194 // because it means that any ByteArray maps left actually describe a region of
2195 // memory that can be ignored when scanning. Dead objects other than byte
2196 // arrays will not contain the byte array map.
1925 static void SweepConservatively(PagedSpace* space, 2197 static void SweepConservatively(PagedSpace* space,
1926 Page* p, 2198 Page* p) {
1927 Address* last_free_start) {
1928 typedef Page::MarkbitsBitmap::CellType CellType;
1929 Page::MarkbitsBitmap* markbits = p->markbits(); 2199 Page::MarkbitsBitmap* markbits = p->markbits();
1930 CellType* cells = markbits->cells(); 2200 Page::MarkbitsBitmap::CellType* cells = markbits->cells();
1931 2201
1932 uint32_t last_cell_index = 2202 p->SetFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY);
2203
2204 // This is the start of the 32 word block that we are currently looking at.
2205 Address block_address = p->ObjectAreaStart();
2206
2207 int last_cell_index =
1933 Page::MarkbitsBitmap::IndexToCell( 2208 Page::MarkbitsBitmap::IndexToCell(
1934 Page::MarkbitsBitmap::CellAlignIndex( 2209 Page::MarkbitsBitmap::CellAlignIndex(
1935 p->AddressToMarkbitIndex(p->AllocationTop()))); 2210 p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
1936 2211
1937 uint32_t cell_index = Page::kFirstUsedCell; 2212 int cell_index = Page::kFirstUsedCell;
1938 uint32_t polluted_cell_index = Page::kFirstUsedCell; 2213
1939 if (cells[cell_index] == 0) { 2214 // Skip over all the dead objects at the start of the page and mark them free.
1940 polluted_cell_index = 2215 for (cell_index = Page::kFirstUsedCell;
1941 SweepFree(space, 2216 cell_index < last_cell_index;
1942 p, 2217 cell_index++, block_address += 32 * kPointerSize) {
1943 p->AddressToMarkbitIndex(p->ObjectAreaStart()), 2218 if (cells[cell_index] != 0) break;
1944 last_cell_index, 2219 }
1945 cells); 2220 int size = block_address - p->ObjectAreaStart();
1946 2221 if (cell_index == last_cell_index) {
1947 if (polluted_cell_index >= last_cell_index) { 2222 space->Free(p->ObjectAreaStart(), size);
1948 // All cells are free. 2223 return;
1949 *last_free_start = p->ObjectAreaStart(); 2224 }
1950 return; 2225 // Grow the size of the start-of-page free space a little to get up to the
2226 // first live object.
2227 Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
2228 // Free the first free space.
2229 size = free_end - p->ObjectAreaStart();
2230 space->Free(p->ObjectAreaStart(), size);
2231 // The start of the current free area is represented in undigested form by
2232 // the address of the last 32-word section that contained a live object and
2233 // the marking bitmap for that cell, which describes where the live object
2234 // started. Unless we find a large free space in the bitmap we will not
2235 // digest this pair into a real address. We start the iteration here at the
2236 // first word in the marking bit map that indicates a live object.
2237 Address free_start = block_address;
2238 uint32_t free_start_cell = cells[cell_index];
2239
2240 for ( ;
2241 cell_index < last_cell_index;
2242 cell_index++, block_address += 32 * kPointerSize) {
2243 ASSERT((unsigned)cell_index ==
2244 Page::MarkbitsBitmap::IndexToCell(
2245 Page::MarkbitsBitmap::CellAlignIndex(
2246 p->AddressToMarkbitIndex(block_address))));
2247 uint32_t cell = cells[cell_index];
2248 if (cell != 0) {
2249 // We have a live object. Check approximately whether it is more than 32
2250 // words since the last live object.
2251 if (block_address - free_start > 32 * kPointerSize) {
2252 free_start = DigestFreeStart(free_start, free_start_cell);
2253 if (block_address - free_start > 32 * kPointerSize) {
2254 // Now that we know the exact start of the free space it still looks
2255 // like we have a large enough free space to be worth bothering with.
2256 // so now we need to find the start of the first live object at the
2257 // end of the free space.
2258 free_end = StartOfLiveObject(block_address, cell);
2259 space->Free(free_start, free_end - free_start);
2260 }
2261 }
2262 // Update our undigested record of where the current free area started.
2263 free_start = block_address;
2264 free_start_cell = cell;
1951 } 2265 }
1952 } 2266 }
1953 2267
1954 p->ClearFlag(Page::IS_CONTINUOUS); 2268 // Handle the free space at the end of the page.
1955 2269 if (block_address - free_start > 32 * kPointerSize) {
1956 ASSERT(cells[polluted_cell_index] != 0); 2270 free_start = DigestFreeStart(free_start, free_start_cell);
1957 for (cell_index = NextCandidate(polluted_cell_index, last_cell_index, cells); 2271 space->Free(free_start, block_address - free_start);
2272 }
2273 }
2274
2275
2276 // Sweep a space precisely. After this has been done the space can
2277 // be iterated precisely, hitting only the live objects. Code space
2278 // is always swept precisely because we want to be able to iterate
2279 // over it. Map space is swept precisely, because it is not compacted.
2280 static void SweepPrecisely(PagedSpace* space,
2281 Page* p) {
2282 Page::MarkbitsBitmap* markbits = p->markbits();
2283 Page::MarkbitsBitmap::CellType* cells = markbits->cells();
2284
2285 p->ClearFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY);
2286
2287 int last_cell_index =
2288 Page::MarkbitsBitmap::IndexToCell(
2289 Page::MarkbitsBitmap::CellAlignIndex(
2290 p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
2291
2292 int cell_index = Page::kFirstUsedCell;
2293 Address free_start = p->ObjectAreaStart();
2294 ASSERT(reinterpret_cast<uint32_t>(free_start) % (32 * kPointerSize) == 0);
2295 Address object_address = p->ObjectAreaStart();
2296 int offsets[16];
2297
2298 for (cell_index = Page::kFirstUsedCell;
1958 cell_index < last_cell_index; 2299 cell_index < last_cell_index;
1959 cell_index = NextCandidate(polluted_cell_index, 2300 cell_index++, object_address += 32 * kPointerSize) {
1960 last_cell_index, 2301 ASSERT((unsigned)cell_index ==
1961 cells)) { 2302 Page::MarkbitsBitmap::IndexToCell(
1962 ASSERT(cells[cell_index] == 0); 2303 Page::MarkbitsBitmap::CellAlignIndex(
1963 2304 p->AddressToMarkbitIndex(object_address))));
1964 int size = SizeOfPreviousObject(p, cell_index, cells); 2305 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
1965 if (size <= 0) { 2306 int live_index = 0;
1966 polluted_cell_index = 2307 for ( ; live_objects != 0; live_objects--) {
1967 SweepFree(space, 2308 Address free_end = object_address + offsets[live_index++] * kPointerSize;
1968 p, 2309 if (free_end != free_start) {
1969 Page::MarkbitsBitmap::CellToIndex(cell_index), 2310 space->Free(free_start, free_end - free_start);
1970 last_cell_index,
1971 cells);
1972 if (polluted_cell_index >= last_cell_index) {
1973 // This free region is the last on the page.
1974 *last_free_start = p->MarkbitIndexToAddress(
1975 Page::MarkbitsBitmap::CellToIndex(cell_index));
1976 return;
1977 } 2311 }
1978 } else { 2312 HeapObject* live_object = HeapObject::FromAddress(free_end);
1979 // Skip cells covered by this object. 2313 free_start = free_end + live_object->Size();
1980 polluted_cell_index = cell_index +
1981 Page::MarkbitsBitmap::IndexToCell(size - 1);
1982 } 2314 }
1983 } 2315 }
1984 } 2316 if (free_start != p->ObjectAreaEnd()) {
1985 2317 space->Free(free_start, p->ObjectAreaEnd() - free_start);
1986 2318 }
1987 static void SweepPrecisely(PagedSpace* space,
1988 Page* p,
1989 Address* last_free_start) {
1990 bool is_previous_alive = true;
1991 Address free_start = NULL;
1992 HeapObject* object;
1993
1994 for (Address current = p->ObjectAreaStart();
1995 current < p->AllocationTop();
1996 current += object->Size()) {
1997 object = HeapObject::FromAddress(current);
1998 if (Marking::IsMarked(object)) {
1999 Marking::ClearMark(object);
2000 MarkCompactCollector::tracer()->decrement_marked_count();
2001
2002 if (!is_previous_alive) { // Transition from free to live.
2003 space->DeallocateBlock(free_start,
2004 static_cast<int>(current - free_start),
2005 true);
2006 is_previous_alive = true;
2007 }
2008 } else {
2009 ASSERT((current + kPointerSize) >= p->AllocationTop() ||
2010 object->Size() == kPointerSize ||
2011 IncrementalMarking::IsWhite(object));
2012 MarkCompactCollector::ReportDeleteIfNeeded(object);
2013 if (is_previous_alive) { // Transition from live to free.
2014 free_start = current;
2015 is_previous_alive = false;
2016 }
2017 }
2018 }
2019
2020 if (!is_previous_alive) *last_free_start = free_start;
2021 } 2319 }
2022 2320
2023 2321
2024 void MarkCompactCollector::SweepSpace(PagedSpace* space, 2322 void MarkCompactCollector::SweepSpace(PagedSpace* space,
2025 SweeperType sweeper) { 2323 SweeperType sweeper) {
2026 PageIterator it(space, PageIterator::PAGES_IN_USE); 2324 space->set_was_swept_conservatively(sweeper == CONSERVATIVE);
2325
2326 // We don't have a linear allocation area while sweeping. It will be restored
2327 // on the first allocation after the sweep.
2328 space->SetTop(NULL, NULL);
2329
2330 space->ClearStats();
2331
2332 PageIterator it(space);
2027 2333
2028 // During sweeping of paged space we are trying to find longest sequences 2334 // During sweeping of paged space we are trying to find longest sequences
2029 // of pages without live objects and free them (instead of putting them on 2335 // of pages without live objects and free them (instead of putting them on
2030 // the free list). 2336 // the free list).
2031 2337
2032 // Page preceding current. 2338 // Page preceding current.
2033 Page* prev = Page::FromAddress(NULL); 2339 Page* prev = Page::FromAddress(NULL);
2034 2340
2035 // First empty page in a sequence.
2036 Page* first_empty_page = Page::FromAddress(NULL);
2037
2038 // Page preceding first empty page.
2039 Page* prec_first_empty_page = Page::FromAddress(NULL);
2040
2041 // If last used page of space ends with a sequence of dead objects
2042 // we can adjust allocation top instead of puting this free area into
2043 // the free list. Thus during sweeping we keep track of such areas
2044 // and defer their deallocation until the sweeping of the next page
2045 // is done: if one of the next pages contains live objects we have
2046 // to put such area into the free list.
2047 Address last_free_start = NULL;
2048 int last_free_size = 0;
2049
2050 while (it.has_next()) { 2341 while (it.has_next()) {
2051 Page* p = it.next(); 2342 Page* p = it.next();
2052 2343 space->IncreaseCapacity(p->ObjectAreaEnd() - p->ObjectAreaStart());
2053 Address free_start = p->AllocationTop();
2054 2344
2055 if (sweeper == CONSERVATIVE) { 2345 if (sweeper == CONSERVATIVE) {
2056 SweepConservatively(space, p, &free_start); 2346 SweepConservatively(space, p);
2057 p->set_linearity_boundary(free_start);
2058 } else { 2347 } else {
2059 ASSERT(sweeper == PRECISE); 2348 ASSERT(sweeper == PRECISE);
2060 SweepPrecisely(space, p, &free_start); 2349 SweepPrecisely(space, p);
2061 } 2350 }
2062
2063 bool page_is_empty = (p->ObjectAreaStart() == free_start);
2064 bool is_previous_alive = (free_start == p->AllocationTop());
2065
2066 ASSERT(free_start <= p->AllocationTop());
2067
2068 if (page_is_empty) {
2069 // This page is empty. Check whether we are in the middle of
2070 // sequence of empty pages and start one if not.
2071 if (!first_empty_page->is_valid()) {
2072 first_empty_page = p;
2073 prec_first_empty_page = prev;
2074 }
2075
2076 if (!is_previous_alive) {
2077 // There are dead objects on this page. Update space accounting stats
2078 // without putting anything into free list.
2079 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
2080 if (size_in_bytes > 0) {
2081 space->DeallocateBlock(free_start, size_in_bytes, false);
2082 }
2083 }
2084 } else {
2085 // This page is not empty. Sequence of empty pages ended on the previous
2086 // one.
2087 if (first_empty_page->is_valid()) {
2088 space->FreePages(prec_first_empty_page, prev);
2089 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
2090 }
2091
2092 // If there is a free ending area on one of the previous pages we have
2093 // deallocate that area and put it on the free list.
2094 if (last_free_size > 0) {
2095 Page::FromAddress(last_free_start)->
2096 SetAllocationWatermark(last_free_start);
2097 space->DeallocateBlock(last_free_start, last_free_size, true);
2098 last_free_start = NULL;
2099 last_free_size = 0;
2100 }
2101
2102 // If the last region of this page was not live we remember it.
2103 if (!is_previous_alive) {
2104 ASSERT(last_free_size == 0);
2105 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
2106 last_free_start = free_start;
2107 }
2108 }
2109
2110 prev = p; 2351 prev = p;
2111 } 2352 }
2112 2353 // TODO(gc): set up allocation top and limit using the free list.
2113 // We reached end of space. See if we need to adjust allocation top.
2114 Address new_allocation_top = NULL;
2115
2116 if (first_empty_page->is_valid()) {
2117 // Last used pages in space are empty. We can move allocation top backwards
2118 // to the beginning of first empty page.
2119 ASSERT(prev == space->AllocationTopPage());
2120 space->FreePages(prec_first_empty_page, prev);
2121 new_allocation_top = first_empty_page->ObjectAreaStart();
2122 }
2123
2124 if (last_free_size > 0) {
2125 // There was a free ending area on the previous page.
2126 // Deallocate it without putting it into freelist and move allocation
2127 // top to the beginning of this free area.
2128 space->DeallocateBlock(last_free_start, last_free_size, false);
2129 new_allocation_top = last_free_start;
2130 }
2131
2132 if (new_allocation_top != NULL) {
2133 #ifdef DEBUG
2134 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
2135 if (!first_empty_page->is_valid()) {
2136 ASSERT(new_allocation_top_page == space->AllocationTopPage());
2137 } else if (last_free_size > 0) {
2138 ASSERT(new_allocation_top_page == prec_first_empty_page);
2139 } else {
2140 ASSERT(new_allocation_top_page == first_empty_page);
2141 }
2142 #endif
2143
2144 space->SetTop(new_allocation_top);
2145 }
2146 } 2354 }
2147 2355
2148 2356
2149 void MarkCompactCollector::SweepSpaces() { 2357 void MarkCompactCollector::SweepSpaces() {
2150 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); 2358 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2151 #ifdef DEBUG 2359 #ifdef DEBUG
2152 state_ = SWEEP_SPACES; 2360 state_ = SWEEP_SPACES;
2153 #endif 2361 #endif
2154 2362
2155 #ifndef DEBUG
2156 SweeperType fast_sweeper = CONSERVATIVE;
2157 #else
2158 SweeperType fast_sweeper = PRECISE;
2159 #endif
2160
2161 ASSERT(!IsCompacting()); 2363 ASSERT(!IsCompacting());
2364 SweeperType how_to_sweep = CONSERVATIVE;
2365 if (sweep_precisely_) how_to_sweep = PRECISE;
2162 // Noncompacting collections simply sweep the spaces to clear the mark 2366 // Noncompacting collections simply sweep the spaces to clear the mark
2163 // bits and free the nonlive blocks (for old and map spaces). We sweep 2367 // bits and free the nonlive blocks (for old and map spaces). We sweep
2164 // the map space last because freeing non-live maps overwrites them and 2368 // the map space last because freeing non-live maps overwrites them and
2165 // the other spaces rely on possibly non-live maps to get the sizes for 2369 // the other spaces rely on possibly non-live maps to get the sizes for
2166 // non-live objects. 2370 // non-live objects.
2167 SweepSpace(Heap::old_pointer_space(), fast_sweeper); 2371 SweepSpace(Heap::old_pointer_space(), how_to_sweep);
2168 SweepSpace(Heap::old_data_space(), fast_sweeper); 2372 SweepSpace(Heap::old_data_space(), how_to_sweep);
2169 SweepSpace(Heap::code_space(), PRECISE); 2373 SweepSpace(Heap::code_space(), PRECISE);
2170 // TODO(gc): implement specialized sweeper for cell space. 2374 // TODO(gc): implement specialized sweeper for cell space.
2171 SweepSpace(Heap::cell_space(), fast_sweeper); 2375 SweepSpace(Heap::cell_space(), PRECISE);
2172 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); 2376 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2173 SweepNewSpace(Heap::new_space()); 2377 SweepNewSpace(Heap::new_space());
2174 } 2378 }
2175 // TODO(gc): ClearNonLiveTransitions depends on precise sweeping of 2379 // TODO(gc): ClearNonLiveTransitions depends on precise sweeping of
2176 // map space to detect whether unmarked map became dead in this 2380 // map space to detect whether unmarked map became dead in this
2177 // collection or in one of the previous ones. 2381 // collection or in one of the previous ones.
2178 // TODO(gc): Implement specialized sweeper for map space. 2382 // TODO(gc): Implement specialized sweeper for map space.
2179 SweepSpace(Heap::map_space(), PRECISE); 2383 SweepSpace(Heap::map_space(), PRECISE);
2180 2384
2181 ASSERT(live_map_objects_size_ <= Heap::map_space()->Size()); 2385 ASSERT(live_map_objects_size_ <= Heap::map_space()->Size());
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
2215 int MarkCompactCollector::IterateLiveObjects(NewSpace* space, 2419 int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
2216 HeapObjectCallback size_f) { 2420 HeapObjectCallback size_f) {
2217 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); 2421 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2218 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f); 2422 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2219 } 2423 }
2220 2424
2221 2425
2222 int MarkCompactCollector::IterateLiveObjects(PagedSpace* space, 2426 int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
2223 HeapObjectCallback size_f) { 2427 HeapObjectCallback size_f) {
2224 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS); 2428 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2429 // TODO(gc): Do a mark-sweep first with precise sweeping.
2225 int total = 0; 2430 int total = 0;
2226 PageIterator it(space, PageIterator::PAGES_IN_USE); 2431 PageIterator it(space);
2227 while (it.has_next()) { 2432 while (it.has_next()) {
2228 Page* p = it.next(); 2433 Page* p = it.next();
2229 total += IterateLiveObjectsInRange(p->ObjectAreaStart(), 2434 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2230 p->AllocationTop(), 2435 p->ObjectAreaEnd(),
2231 size_f); 2436 size_f);
2232 } 2437 }
2233 return total; 2438 return total;
2234 } 2439 }
2235 2440
2236 2441
2237 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) { 2442 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
2238 #ifdef ENABLE_GDB_JIT_INTERFACE 2443 #ifdef ENABLE_GDB_JIT_INTERFACE
2239 if (obj->IsCode()) { 2444 if (obj->IsCode()) {
2240 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); 2445 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
(...skipping 13 matching lines...) Expand all
2254 } 2459 }
2255 2460
2256 2461
2257 void MarkCompactCollector::Initialize() { 2462 void MarkCompactCollector::Initialize() {
2258 StaticPointersToNewGenUpdatingVisitor::Initialize(); 2463 StaticPointersToNewGenUpdatingVisitor::Initialize();
2259 StaticMarkingVisitor::Initialize(); 2464 StaticMarkingVisitor::Initialize();
2260 } 2465 }
2261 2466
2262 2467
2263 } } // namespace v8::internal 2468 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698