OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
43 reduce_memory_footprint_(false), | 43 reduce_memory_footprint_(false), |
44 abort_incremental_marking_(false), | 44 abort_incremental_marking_(false), |
45 marking_parity_(ODD_MARKING_PARITY), | 45 marking_parity_(ODD_MARKING_PARITY), |
46 compacting_(false), | 46 compacting_(false), |
47 was_marked_incrementally_(false), | 47 was_marked_incrementally_(false), |
48 sweeping_in_progress_(false), | 48 sweeping_in_progress_(false), |
49 pending_sweeper_jobs_semaphore_(0), | 49 pending_sweeper_jobs_semaphore_(0), |
50 evacuation_(false), | 50 evacuation_(false), |
51 migration_slots_buffer_(NULL), | 51 migration_slots_buffer_(NULL), |
52 heap_(heap), | 52 heap_(heap), |
| 53 marking_deque_memory_(NULL), |
| 54 marking_deque_memory_committed_(false), |
53 code_flusher_(NULL), | 55 code_flusher_(NULL), |
54 have_code_to_deoptimize_(false) { | 56 have_code_to_deoptimize_(false) { |
55 } | 57 } |
56 | 58 |
57 #ifdef VERIFY_HEAP | 59 #ifdef VERIFY_HEAP |
58 class VerifyMarkingVisitor : public ObjectVisitor { | 60 class VerifyMarkingVisitor : public ObjectVisitor { |
59 public: | 61 public: |
60 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} | 62 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} |
61 | 63 |
62 void VisitPointers(Object** start, Object** end) { | 64 void VisitPointers(Object** start, Object** end) { |
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
226 } | 228 } |
227 #endif // VERIFY_HEAP | 229 #endif // VERIFY_HEAP |
228 | 230 |
229 | 231 |
230 void MarkCompactCollector::SetUp() { | 232 void MarkCompactCollector::SetUp() { |
231 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space())); | 233 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space())); |
232 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space())); | 234 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space())); |
233 } | 235 } |
234 | 236 |
235 | 237 |
236 void MarkCompactCollector::TearDown() { AbortCompaction(); } | 238 void MarkCompactCollector::TearDown() { |
| 239 AbortCompaction(); |
| 240 delete marking_deque_memory_; |
| 241 } |
237 | 242 |
238 | 243 |
239 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { | 244 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { |
240 p->MarkEvacuationCandidate(); | 245 p->MarkEvacuationCandidate(); |
241 evacuation_candidates_.Add(p); | 246 evacuation_candidates_.Add(p); |
242 } | 247 } |
243 | 248 |
244 | 249 |
245 static void TraceFragmentation(PagedSpace* space) { | 250 static void TraceFragmentation(PagedSpace* space) { |
246 int number_of_pages = space->CountTotalPages(); | 251 int number_of_pages = space->CountTotalPages(); |
(...skipping 1755 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2002 SetMark(weak_object_to_code_table, mark); | 2007 SetMark(weak_object_to_code_table, mark); |
2003 } | 2008 } |
2004 } | 2009 } |
2005 | 2010 |
2006 | 2011 |
2007 // Mark all objects reachable from the objects on the marking stack. | 2012 // Mark all objects reachable from the objects on the marking stack. |
2008 // Before: the marking stack contains zero or more heap object pointers. | 2013 // Before: the marking stack contains zero or more heap object pointers. |
2009 // After: the marking stack is empty, and all objects reachable from the | 2014 // After: the marking stack is empty, and all objects reachable from the |
2010 // marking stack have been marked, or are overflowed in the heap. | 2015 // marking stack have been marked, or are overflowed in the heap. |
2011 void MarkCompactCollector::EmptyMarkingDeque() { | 2016 void MarkCompactCollector::EmptyMarkingDeque() { |
| 2017 Map* filler_map = heap_->one_pointer_filler_map(); |
2012 while (!marking_deque_.IsEmpty()) { | 2018 while (!marking_deque_.IsEmpty()) { |
2013 HeapObject* object = marking_deque_.Pop(); | 2019 HeapObject* object = marking_deque_.Pop(); |
| 2020 // Explicitly skip one word fillers. Incremental markbit patterns are |
| 2021 // correct only for objects that occupy at least two words. |
| 2022 Map* map = object->map(); |
| 2023 if (map == filler_map) continue; |
| 2024 |
2014 DCHECK(object->IsHeapObject()); | 2025 DCHECK(object->IsHeapObject()); |
2015 DCHECK(heap()->Contains(object)); | 2026 DCHECK(heap()->Contains(object)); |
2016 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 2027 DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object))); |
2017 | 2028 |
2018 Map* map = object->map(); | |
2019 MarkBit map_mark = Marking::MarkBitFrom(map); | 2029 MarkBit map_mark = Marking::MarkBitFrom(map); |
2020 MarkObject(map, map_mark); | 2030 MarkObject(map, map_mark); |
2021 | 2031 |
2022 MarkCompactMarkingVisitor::IterateBody(map, object); | 2032 MarkCompactMarkingVisitor::IterateBody(map, object); |
2023 } | 2033 } |
2024 } | 2034 } |
2025 | 2035 |
2026 | 2036 |
2027 // Sweep the heap for overflowed objects, clear their overflow bits, and | 2037 // Sweep the heap for overflowed objects, clear their overflow bits, and |
2028 // push them on the marking stack. Stop early if the marking stack fills | 2038 // push them on the marking stack. Stop early if the marking stack fills |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2103 if (!code->CanDeoptAt(it.frame()->pc())) { | 2113 if (!code->CanDeoptAt(it.frame()->pc())) { |
2104 code->CodeIterateBody(visitor); | 2114 code->CodeIterateBody(visitor); |
2105 } | 2115 } |
2106 ProcessMarkingDeque(); | 2116 ProcessMarkingDeque(); |
2107 return; | 2117 return; |
2108 } | 2118 } |
2109 } | 2119 } |
2110 } | 2120 } |
2111 | 2121 |
2112 | 2122 |
| 2123 void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize() { |
| 2124 if (marking_deque_memory_ == NULL) { |
| 2125 marking_deque_memory_ = new base::VirtualMemory(4 * MB); |
| 2126 } |
| 2127 if (!marking_deque_memory_committed_) { |
| 2128 bool success = marking_deque_memory_->Commit( |
| 2129 reinterpret_cast<Address>(marking_deque_memory_->address()), |
| 2130 marking_deque_memory_->size(), |
| 2131 false); // Not executable. |
| 2132 CHECK(success); |
| 2133 marking_deque_memory_committed_ = true; |
| 2134 InitializeMarkingDeque(); |
| 2135 } |
| 2136 } |
| 2137 |
| 2138 |
| 2139 void MarkCompactCollector::InitializeMarkingDeque() { |
| 2140 if (marking_deque_memory_committed_) { |
| 2141 Address addr = static_cast<Address>(marking_deque_memory_->address()); |
| 2142 size_t size = marking_deque_memory_->size(); |
| 2143 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; |
| 2144 marking_deque_.Initialize(addr, addr + size); |
| 2145 } |
| 2146 } |
| 2147 |
| 2148 |
| 2149 void MarkCompactCollector::UncommitMarkingDeque() { |
| 2150 if (marking_deque_memory_committed_) { |
| 2151 bool success = marking_deque_memory_->Uncommit( |
| 2152 reinterpret_cast<Address>(marking_deque_memory_->address()), |
| 2153 marking_deque_memory_->size()); |
| 2154 CHECK(success); |
| 2155 marking_deque_memory_committed_ = false; |
| 2156 } |
| 2157 } |
| 2158 |
| 2159 |
2113 void MarkCompactCollector::MarkLiveObjects() { | 2160 void MarkCompactCollector::MarkLiveObjects() { |
2114 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK); | 2161 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK); |
2115 double start_time = 0.0; | 2162 double start_time = 0.0; |
2116 if (FLAG_print_cumulative_gc_stat) { | 2163 if (FLAG_print_cumulative_gc_stat) { |
2117 start_time = base::OS::TimeCurrentMillis(); | 2164 start_time = base::OS::TimeCurrentMillis(); |
2118 } | 2165 } |
2119 // The recursive GC marker detects when it is nearing stack overflow, | 2166 // The recursive GC marker detects when it is nearing stack overflow, |
2120 // and switches to a different marking system. JS interrupts interfere | 2167 // and switches to a different marking system. JS interrupts interfere |
2121 // with the C stack limit check. | 2168 // with the C stack limit check. |
2122 PostponeInterruptsScope postpone(isolate()); | 2169 PostponeInterruptsScope postpone(isolate()); |
2123 | 2170 |
2124 bool incremental_marking_overflowed = false; | |
2125 IncrementalMarking* incremental_marking = heap_->incremental_marking(); | 2171 IncrementalMarking* incremental_marking = heap_->incremental_marking(); |
2126 if (was_marked_incrementally_) { | 2172 if (was_marked_incrementally_) { |
2127 // Finalize the incremental marking and check whether we had an overflow. | |
2128 // Both markers use grey color to mark overflowed objects so | |
2129 // non-incremental marker can deal with them as if overflow | |
2130 // occured during normal marking. | |
2131 // But incremental marker uses a separate marking deque | |
2132 // so we have to explicitly copy its overflow state. | |
2133 incremental_marking->Finalize(); | 2173 incremental_marking->Finalize(); |
2134 incremental_marking_overflowed = | |
2135 incremental_marking->marking_deque()->overflowed(); | |
2136 incremental_marking->marking_deque()->ClearOverflowed(); | |
2137 } else { | 2174 } else { |
2138 // Abort any pending incremental activities e.g. incremental sweeping. | 2175 // Abort any pending incremental activities e.g. incremental sweeping. |
2139 incremental_marking->Abort(); | 2176 incremental_marking->Abort(); |
| 2177 InitializeMarkingDeque(); |
2140 } | 2178 } |
2141 | 2179 |
2142 #ifdef DEBUG | 2180 #ifdef DEBUG |
2143 DCHECK(state_ == PREPARE_GC); | 2181 DCHECK(state_ == PREPARE_GC); |
2144 state_ = MARK_LIVE_OBJECTS; | 2182 state_ = MARK_LIVE_OBJECTS; |
2145 #endif | 2183 #endif |
2146 // The to space contains live objects, a page in from space is used as a | |
2147 // marking stack. | |
2148 Address marking_deque_start = heap()->new_space()->FromSpacePageLow(); | |
2149 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh(); | |
2150 if (FLAG_force_marking_deque_overflows) { | |
2151 marking_deque_end = marking_deque_start + 64 * kPointerSize; | |
2152 } | |
2153 marking_deque_.Initialize(marking_deque_start, marking_deque_end); | |
2154 DCHECK(!marking_deque_.overflowed()); | |
2155 | 2184 |
2156 if (incremental_marking_overflowed) { | 2185 EnsureMarkingDequeIsCommittedAndInitialize(); |
2157 // There are overflowed objects left in the heap after incremental marking. | |
2158 marking_deque_.SetOverflowed(); | |
2159 } | |
2160 | 2186 |
2161 PrepareForCodeFlushing(); | 2187 PrepareForCodeFlushing(); |
2162 | 2188 |
2163 if (was_marked_incrementally_) { | 2189 if (was_marked_incrementally_) { |
2164 // There is no write barrier on cells so we have to scan them now at the end | 2190 // There is no write barrier on cells so we have to scan them now at the end |
2165 // of the incremental marking. | 2191 // of the incremental marking. |
2166 { | 2192 { |
2167 HeapObjectIterator cell_iterator(heap()->cell_space()); | 2193 HeapObjectIterator cell_iterator(heap()->cell_space()); |
2168 HeapObject* cell; | 2194 HeapObject* cell; |
2169 while ((cell = cell_iterator.Next()) != NULL) { | 2195 while ((cell = cell_iterator.Next()) != NULL) { |
(...skipping 2236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4406 SlotsBuffer* buffer = *buffer_address; | 4432 SlotsBuffer* buffer = *buffer_address; |
4407 while (buffer != NULL) { | 4433 while (buffer != NULL) { |
4408 SlotsBuffer* next_buffer = buffer->next(); | 4434 SlotsBuffer* next_buffer = buffer->next(); |
4409 DeallocateBuffer(buffer); | 4435 DeallocateBuffer(buffer); |
4410 buffer = next_buffer; | 4436 buffer = next_buffer; |
4411 } | 4437 } |
4412 *buffer_address = NULL; | 4438 *buffer_address = NULL; |
4413 } | 4439 } |
4414 } | 4440 } |
4415 } // namespace v8::internal | 4441 } // namespace v8::internal |
OLD | NEW |