Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/heap/mark-compact.cc

Issue 770453003: Use just one marking deque. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h" 10 #include "src/compilation-cache.h"
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
43 reduce_memory_footprint_(false), 43 reduce_memory_footprint_(false),
44 abort_incremental_marking_(false), 44 abort_incremental_marking_(false),
45 marking_parity_(ODD_MARKING_PARITY), 45 marking_parity_(ODD_MARKING_PARITY),
46 compacting_(false), 46 compacting_(false),
47 was_marked_incrementally_(false), 47 was_marked_incrementally_(false),
48 sweeping_in_progress_(false), 48 sweeping_in_progress_(false),
49 pending_sweeper_jobs_semaphore_(0), 49 pending_sweeper_jobs_semaphore_(0),
50 evacuation_(false), 50 evacuation_(false),
51 migration_slots_buffer_(NULL), 51 migration_slots_buffer_(NULL),
52 heap_(heap), 52 heap_(heap),
53 marking_deque_memory_(NULL),
54 marking_deque_memory_committed_(false),
53 code_flusher_(NULL), 55 code_flusher_(NULL),
54 have_code_to_deoptimize_(false) { 56 have_code_to_deoptimize_(false) {
55 } 57 }
56 58
57 #ifdef VERIFY_HEAP 59 #ifdef VERIFY_HEAP
58 class VerifyMarkingVisitor : public ObjectVisitor { 60 class VerifyMarkingVisitor : public ObjectVisitor {
59 public: 61 public:
60 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} 62 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
61 63
62 void VisitPointers(Object** start, Object** end) { 64 void VisitPointers(Object** start, Object** end) {
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
226 } 228 }
227 #endif // VERIFY_HEAP 229 #endif // VERIFY_HEAP
228 230
229 231
230 void MarkCompactCollector::SetUp() { 232 void MarkCompactCollector::SetUp() {
231 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space())); 233 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
232 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space())); 234 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
233 } 235 }
234 236
235 237
236 void MarkCompactCollector::TearDown() { AbortCompaction(); } 238 void MarkCompactCollector::TearDown() {
239 AbortCompaction();
240 delete marking_deque_memory_;
241 }
237 242
238 243
239 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { 244 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
240 p->MarkEvacuationCandidate(); 245 p->MarkEvacuationCandidate();
241 evacuation_candidates_.Add(p); 246 evacuation_candidates_.Add(p);
242 } 247 }
243 248
244 249
245 static void TraceFragmentation(PagedSpace* space) { 250 static void TraceFragmentation(PagedSpace* space) {
246 int number_of_pages = space->CountTotalPages(); 251 int number_of_pages = space->CountTotalPages();
247 intptr_t reserved = (number_of_pages * space->AreaSize()); 252 intptr_t reserved = (number_of_pages * space->AreaSize());
248 intptr_t free = reserved - space->SizeOfObjects(); 253 intptr_t free = reserved - space->SizeOfObjects();
249 PrintF("[%s]: %d pages, %d (%.1f%%) free\n", 254 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
250 AllocationSpaceName(space->identity()), number_of_pages, 255 AllocationSpaceName(space->identity()), number_of_pages,
251 static_cast<int>(free), static_cast<double>(free) * 100 / reserved); 256 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
252 } 257 }
253 258
254 259
255 bool MarkCompactCollector::StartCompaction(CompactionMode mode) { 260 bool MarkCompactCollector::StartCompaction(MarkingMode mode) {
256 if (!compacting_) { 261 if (!compacting_) {
257 DCHECK(evacuation_candidates_.length() == 0); 262 DCHECK(evacuation_candidates_.length() == 0);
258 263
259 #ifdef ENABLE_GDB_JIT_INTERFACE 264 #ifdef ENABLE_GDB_JIT_INTERFACE
260 // If GDBJIT interface is active disable compaction. 265 // If GDBJIT interface is active disable compaction.
261 if (FLAG_gdbjit) return false; 266 if (FLAG_gdbjit) return false;
262 #endif 267 #endif
263 268
264 CollectEvacuationCandidates(heap()->old_pointer_space()); 269 CollectEvacuationCandidates(heap()->old_pointer_space());
265 CollectEvacuationCandidates(heap()->old_data_space()); 270 CollectEvacuationCandidates(heap()->old_data_space());
266 271
267 if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION || 272 if (FLAG_compact_code_space &&
268 FLAG_incremental_code_compaction)) { 273 (mode == NON_INCREMENTAL || FLAG_incremental_code_compaction)) {
269 CollectEvacuationCandidates(heap()->code_space()); 274 CollectEvacuationCandidates(heap()->code_space());
270 } else if (FLAG_trace_fragmentation) { 275 } else if (FLAG_trace_fragmentation) {
271 TraceFragmentation(heap()->code_space()); 276 TraceFragmentation(heap()->code_space());
272 } 277 }
273 278
274 if (FLAG_trace_fragmentation) { 279 if (FLAG_trace_fragmentation) {
275 TraceFragmentation(heap()->map_space()); 280 TraceFragmentation(heap()->map_space());
276 TraceFragmentation(heap()->cell_space()); 281 TraceFragmentation(heap()->cell_space());
277 TraceFragmentation(heap()->property_cell_space()); 282 TraceFragmentation(heap()->property_cell_space());
278 } 283 }
(...skipping 549 matching lines...) Expand 10 before | Expand all | Expand 10 after
828 ClearMarkbits(); 833 ClearMarkbits();
829 AbortWeakCollections(); 834 AbortWeakCollections();
830 AbortWeakCells(); 835 AbortWeakCells();
831 AbortCompaction(); 836 AbortCompaction();
832 was_marked_incrementally_ = false; 837 was_marked_incrementally_ = false;
833 } 838 }
834 839
835 // Don't start compaction if we are in the middle of incremental 840 // Don't start compaction if we are in the middle of incremental
836 // marking cycle. We did not collect any slots. 841 // marking cycle. We did not collect any slots.
837 if (!FLAG_never_compact && !was_marked_incrementally_) { 842 if (!FLAG_never_compact && !was_marked_incrementally_) {
838 StartCompaction(NON_INCREMENTAL_COMPACTION); 843 StartCompaction(NON_INCREMENTAL);
839 } 844 }
840 845
841 PagedSpaces spaces(heap()); 846 PagedSpaces spaces(heap());
842 for (PagedSpace* space = spaces.next(); space != NULL; 847 for (PagedSpace* space = spaces.next(); space != NULL;
843 space = spaces.next()) { 848 space = spaces.next()) {
844 space->PrepareForMarkCompact(); 849 space->PrepareForMarkCompact();
845 } 850 }
846 851
847 #ifdef VERIFY_HEAP 852 #ifdef VERIFY_HEAP
848 if (!was_marked_incrementally_ && FLAG_verify_heap) { 853 if (!was_marked_incrementally_ && FLAG_verify_heap) {
(...skipping 824 matching lines...) Expand 10 before | Expand all | Expand 10 after
1673 // Mark the object. 1678 // Mark the object.
1674 collector_->SetMark(object, mark_bit); 1679 collector_->SetMark(object, mark_bit);
1675 1680
1676 // Mark the map pointer and body, and push them on the marking stack. 1681 // Mark the map pointer and body, and push them on the marking stack.
1677 MarkBit map_mark = Marking::MarkBitFrom(map); 1682 MarkBit map_mark = Marking::MarkBitFrom(map);
1678 collector_->MarkObject(map, map_mark); 1683 collector_->MarkObject(map, map_mark);
1679 MarkCompactMarkingVisitor::IterateBody(map, object); 1684 MarkCompactMarkingVisitor::IterateBody(map, object);
1680 1685
1681 // Mark all the objects reachable from the map and body. May leave 1686 // Mark all the objects reachable from the map and body. May leave
1682 // overflowed objects in the heap. 1687 // overflowed objects in the heap.
1683 collector_->EmptyMarkingDeque(); 1688 collector_->EmptyMarkingDeque<MarkCompactCollector::NON_INCREMENTAL>();
1684 } 1689 }
1685 1690
1686 MarkCompactCollector* collector_; 1691 MarkCompactCollector* collector_;
1687 }; 1692 };
1688 1693
1689 1694
1690 // Helper class for pruning the string table. 1695 // Helper class for pruning the string table.
1691 template <bool finalize_external_strings> 1696 template <bool finalize_external_strings>
1692 class StringTableCleaner : public ObjectVisitor { 1697 class StringTableCleaner : public ObjectVisitor {
1693 public: 1698 public:
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after
1950 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); 1955 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1951 1956
1952 // Handle the string table specially. 1957 // Handle the string table specially.
1953 MarkStringTable(visitor); 1958 MarkStringTable(visitor);
1954 1959
1955 MarkWeakObjectToCodeTable(); 1960 MarkWeakObjectToCodeTable();
1956 1961
1957 // There may be overflowed objects in the heap. Visit them now. 1962 // There may be overflowed objects in the heap. Visit them now.
1958 while (marking_deque_.overflowed()) { 1963 while (marking_deque_.overflowed()) {
1959 RefillMarkingDeque(); 1964 RefillMarkingDeque();
1960 EmptyMarkingDeque(); 1965 EmptyMarkingDeque<MarkCompactCollector::NON_INCREMENTAL>();
1961 } 1966 }
1962 } 1967 }
1963 1968
1964 1969
1965 void MarkCompactCollector::MarkImplicitRefGroups() { 1970 void MarkCompactCollector::MarkImplicitRefGroups() {
1966 List<ImplicitRefGroup*>* ref_groups = 1971 List<ImplicitRefGroup*>* ref_groups =
1967 isolate()->global_handles()->implicit_ref_groups(); 1972 isolate()->global_handles()->implicit_ref_groups();
1968 1973
1969 int last = 0; 1974 int last = 0;
1970 for (int i = 0; i < ref_groups->length(); i++) { 1975 for (int i = 0; i < ref_groups->length(); i++) {
(...skipping 30 matching lines...) Expand all
2001 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table); 2006 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2002 SetMark(weak_object_to_code_table, mark); 2007 SetMark(weak_object_to_code_table, mark);
2003 } 2008 }
2004 } 2009 }
2005 2010
2006 2011
2007 // Mark all objects reachable from the objects on the marking stack. 2012 // Mark all objects reachable from the objects on the marking stack.
2008 // Before: the marking stack contains zero or more heap object pointers. 2013 // Before: the marking stack contains zero or more heap object pointers.
2009 // After: the marking stack is empty, and all objects reachable from the 2014 // After: the marking stack is empty, and all objects reachable from the
2010 // marking stack have been marked, or are overflowed in the heap. 2015 // marking stack have been marked, or are overflowed in the heap.
2016 template void MarkCompactCollector::EmptyMarkingDeque<
2017 MarkCompactCollector::INCREMENTAL>();
2018 template void MarkCompactCollector::EmptyMarkingDeque<
2019 MarkCompactCollector::NON_INCREMENTAL>();
2020
2021 template <MarkCompactCollector::MarkingMode mode>
2011 void MarkCompactCollector::EmptyMarkingDeque() { 2022 void MarkCompactCollector::EmptyMarkingDeque() {
2023 Map* filler_map = heap_->one_pointer_filler_map();
2012 while (!marking_deque_.IsEmpty()) { 2024 while (!marking_deque_.IsEmpty()) {
2013 HeapObject* object = marking_deque_.Pop(); 2025 HeapObject* object = marking_deque_.Pop();
2026 // Explicitly skip one word fillers. Incremental markbit patterns are
2027 // correct only for objects that occupy at least two words.
2028 Map* map = object->map();
2029 if (map == filler_map) continue;
2030
2014 DCHECK(object->IsHeapObject()); 2031 DCHECK(object->IsHeapObject());
2015 DCHECK(heap()->Contains(object)); 2032 DCHECK(heap()->Contains(object));
2016 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 2033 DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
jochen (gone - plz use gerrit) 2014/12/01 14:32:04 why was this inverted?
Hannes Payer (out of office) 2014/12/01 14:53:23 Because it can be grey or black.
2017 2034
2018 Map* map = object->map();
2019 MarkBit map_mark = Marking::MarkBitFrom(map); 2035 MarkBit map_mark = Marking::MarkBitFrom(map);
2020 MarkObject(map, map_mark); 2036 MarkObject(map, map_mark);
2021 2037
2022 MarkCompactMarkingVisitor::IterateBody(map, object); 2038 if (mode == NON_INCREMENTAL) {
2039 MarkCompactMarkingVisitor::IterateBody(map, object);
2040 } else {
2041 heap_->incremental_marking()->VisitObject(map, object,
2042 object->SizeFromMap(map));
2043 }
2023 } 2044 }
2024 } 2045 }
2025 2046
2026 2047
2027 // Sweep the heap for overflowed objects, clear their overflow bits, and 2048 // Sweep the heap for overflowed objects, clear their overflow bits, and
2028 // push them on the marking stack. Stop early if the marking stack fills 2049 // push them on the marking stack. Stop early if the marking stack fills
2029 // before sweeping completes. If sweeping completes, there are no remaining 2050 // before sweeping completes. If sweeping completes, there are no remaining
2030 // overflowed objects in the heap so the overflow flag on the markings stack 2051 // overflowed objects in the heap so the overflow flag on the markings stack
2031 // is cleared. 2052 // is cleared.
2032 void MarkCompactCollector::RefillMarkingDeque() { 2053 void MarkCompactCollector::RefillMarkingDeque() {
(...skipping 28 matching lines...) Expand all
2061 2082
2062 marking_deque_.ClearOverflowed(); 2083 marking_deque_.ClearOverflowed();
2063 } 2084 }
2064 2085
2065 2086
2066 // Mark all objects reachable (transitively) from objects on the marking 2087 // Mark all objects reachable (transitively) from objects on the marking
2067 // stack. Before: the marking stack contains zero or more heap object 2088 // stack. Before: the marking stack contains zero or more heap object
2068 // pointers. After: the marking stack is empty and there are no overflowed 2089 // pointers. After: the marking stack is empty and there are no overflowed
2069 // objects in the heap. 2090 // objects in the heap.
2070 void MarkCompactCollector::ProcessMarkingDeque() { 2091 void MarkCompactCollector::ProcessMarkingDeque() {
2071 EmptyMarkingDeque(); 2092 EmptyMarkingDeque<MarkCompactCollector::NON_INCREMENTAL>();
2072 while (marking_deque_.overflowed()) { 2093 while (marking_deque_.overflowed()) {
2073 RefillMarkingDeque(); 2094 RefillMarkingDeque();
2074 EmptyMarkingDeque(); 2095 EmptyMarkingDeque<MarkCompactCollector::NON_INCREMENTAL>();
2075 } 2096 }
2076 } 2097 }
2077 2098
2078 2099
2079 // Mark all objects reachable (transitively) from objects on the marking 2100 // Mark all objects reachable (transitively) from objects on the marking
2080 // stack including references only considered in the atomic marking pause. 2101 // stack including references only considered in the atomic marking pause.
2081 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) { 2102 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2082 bool work_to_do = true; 2103 bool work_to_do = true;
2083 DCHECK(marking_deque_.IsEmpty()); 2104 DCHECK(marking_deque_.IsEmpty());
2084 while (work_to_do) { 2105 while (work_to_do) {
(...skipping 18 matching lines...) Expand all
2103 if (!code->CanDeoptAt(it.frame()->pc())) { 2124 if (!code->CanDeoptAt(it.frame()->pc())) {
2104 code->CodeIterateBody(visitor); 2125 code->CodeIterateBody(visitor);
2105 } 2126 }
2106 ProcessMarkingDeque(); 2127 ProcessMarkingDeque();
2107 return; 2128 return;
2108 } 2129 }
2109 } 2130 }
2110 } 2131 }
2111 2132
2112 2133
2134 void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize() {
2135 if (marking_deque_memory_ == NULL) {
2136 marking_deque_memory_ = new base::VirtualMemory(4 * MB);
2137 }
2138 if (!marking_deque_memory_committed_) {
2139 bool success = marking_deque_memory_->Commit(
2140 reinterpret_cast<Address>(marking_deque_memory_->address()),
2141 marking_deque_memory_->size(),
2142 false); // Not executable.
2143 CHECK(success);
2144 marking_deque_memory_committed_ = true;
2145 InitializeMarkingDeque();
2146 }
2147 }
2148
2149
2150 void MarkCompactCollector::InitializeMarkingDeque() {
2151 if (marking_deque_memory_committed_) {
2152 Address addr = static_cast<Address>(marking_deque_memory_->address());
2153 size_t size = marking_deque_memory_->size();
2154 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
2155 marking_deque_.Initialize(addr, addr + size);
2156 }
2157 }
2158
2159
2160 void MarkCompactCollector::UncommitMarkingDeque() {
2161 if (marking_deque_memory_committed_) {
2162 bool success = marking_deque_memory_->Uncommit(
2163 reinterpret_cast<Address>(marking_deque_memory_->address()),
2164 marking_deque_memory_->size());
2165 CHECK(success);
2166 marking_deque_memory_committed_ = false;
2167 }
2168 }
2169
2170
2113 void MarkCompactCollector::MarkLiveObjects() { 2171 void MarkCompactCollector::MarkLiveObjects() {
2114 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK); 2172 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
2115 double start_time = 0.0; 2173 double start_time = 0.0;
2116 if (FLAG_print_cumulative_gc_stat) { 2174 if (FLAG_print_cumulative_gc_stat) {
2117 start_time = base::OS::TimeCurrentMillis(); 2175 start_time = base::OS::TimeCurrentMillis();
2118 } 2176 }
2119 // The recursive GC marker detects when it is nearing stack overflow, 2177 // The recursive GC marker detects when it is nearing stack overflow,
2120 // and switches to a different marking system. JS interrupts interfere 2178 // and switches to a different marking system. JS interrupts interfere
2121 // with the C stack limit check. 2179 // with the C stack limit check.
2122 PostponeInterruptsScope postpone(isolate()); 2180 PostponeInterruptsScope postpone(isolate());
2123 2181
2124 bool incremental_marking_overflowed = false;
2125 IncrementalMarking* incremental_marking = heap_->incremental_marking(); 2182 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2126 if (was_marked_incrementally_) { 2183 if (was_marked_incrementally_) {
2127 // Finalize the incremental marking and check whether we had an overflow.
2128 // Both markers use grey color to mark overflowed objects so
2129 // non-incremental marker can deal with them as if overflow
2130 // occured during normal marking.
2131 // But incremental marker uses a separate marking deque
2132 // so we have to explicitly copy its overflow state.
2133 incremental_marking->Finalize(); 2184 incremental_marking->Finalize();
2134 incremental_marking_overflowed =
2135 incremental_marking->marking_deque()->overflowed();
2136 incremental_marking->marking_deque()->ClearOverflowed();
2137 } else { 2185 } else {
2138 // Abort any pending incremental activities e.g. incremental sweeping. 2186 // Abort any pending incremental activities e.g. incremental sweeping.
2139 incremental_marking->Abort(); 2187 incremental_marking->Abort();
2188 InitializeMarkingDeque();
2140 } 2189 }
2141 2190
2142 #ifdef DEBUG 2191 #ifdef DEBUG
2143 DCHECK(state_ == PREPARE_GC); 2192 DCHECK(state_ == PREPARE_GC);
2144 state_ = MARK_LIVE_OBJECTS; 2193 state_ = MARK_LIVE_OBJECTS;
2145 #endif 2194 #endif
2146 // The to space contains live objects, a page in from space is used as a
2147 // marking stack.
2148 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2149 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2150 if (FLAG_force_marking_deque_overflows) {
2151 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2152 }
2153 marking_deque_.Initialize(marking_deque_start, marking_deque_end);
2154 DCHECK(!marking_deque_.overflowed());
2155 2195
2156 if (incremental_marking_overflowed) { 2196 EnsureMarkingDequeIsCommittedAndInitialize();
2157 // There are overflowed objects left in the heap after incremental marking.
2158 marking_deque_.SetOverflowed();
2159 }
2160 2197
2161 PrepareForCodeFlushing(); 2198 PrepareForCodeFlushing();
2162 2199
2163 if (was_marked_incrementally_) { 2200 if (was_marked_incrementally_) {
2164 // There is no write barrier on cells so we have to scan them now at the end 2201 // There is no write barrier on cells so we have to scan them now at the end
2165 // of the incremental marking. 2202 // of the incremental marking.
2166 { 2203 {
2167 HeapObjectIterator cell_iterator(heap()->cell_space()); 2204 HeapObjectIterator cell_iterator(heap()->cell_space());
2168 HeapObject* cell; 2205 HeapObject* cell;
2169 while ((cell = cell_iterator.Next()) != NULL) { 2206 while ((cell = cell_iterator.Next()) != NULL) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
2203 // reachable only from weak global handles. 2240 // reachable only from weak global handles.
2204 // 2241 //
2205 // First we identify nonlive weak handles and mark them as pending 2242 // First we identify nonlive weak handles and mark them as pending
2206 // destruction. 2243 // destruction.
2207 heap()->isolate()->global_handles()->IdentifyWeakHandles( 2244 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2208 &IsUnmarkedHeapObject); 2245 &IsUnmarkedHeapObject);
2209 // Then we mark the objects and process the transitive closure. 2246 // Then we mark the objects and process the transitive closure.
2210 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); 2247 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2211 while (marking_deque_.overflowed()) { 2248 while (marking_deque_.overflowed()) {
2212 RefillMarkingDeque(); 2249 RefillMarkingDeque();
2213 EmptyMarkingDeque(); 2250 EmptyMarkingDeque<MarkCompactCollector::NON_INCREMENTAL>();
2214 } 2251 }
2215 2252
2216 // Repeat host application specific and Harmony weak maps marking to 2253 // Repeat host application specific and Harmony weak maps marking to
2217 // mark unmarked objects reachable from the weak roots. 2254 // mark unmarked objects reachable from the weak roots.
2218 ProcessEphemeralMarking(&root_visitor); 2255 ProcessEphemeralMarking(&root_visitor);
2219 2256
2220 AfterMarking(); 2257 AfterMarking();
2221 2258
2222 if (FLAG_print_cumulative_gc_stat) { 2259 if (FLAG_print_cumulative_gc_stat) {
2223 heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time); 2260 heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
(...skipping 2182 matching lines...) Expand 10 before | Expand all | Expand 10 after
4406 SlotsBuffer* buffer = *buffer_address; 4443 SlotsBuffer* buffer = *buffer_address;
4407 while (buffer != NULL) { 4444 while (buffer != NULL) {
4408 SlotsBuffer* next_buffer = buffer->next(); 4445 SlotsBuffer* next_buffer = buffer->next();
4409 DeallocateBuffer(buffer); 4446 DeallocateBuffer(buffer);
4410 buffer = next_buffer; 4447 buffer = next_buffer;
4411 } 4448 }
4412 *buffer_address = NULL; 4449 *buffer_address = NULL;
4413 } 4450 }
4414 } 4451 }
4415 } // namespace v8::internal 4452 } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698