| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 45 finalize_incremental_marking_(false), | 45 finalize_incremental_marking_(false), |
| 46 marking_parity_(ODD_MARKING_PARITY), | 46 marking_parity_(ODD_MARKING_PARITY), |
| 47 compacting_(false), | 47 compacting_(false), |
| 48 was_marked_incrementally_(false), | 48 was_marked_incrementally_(false), |
| 49 sweeping_in_progress_(false), | 49 sweeping_in_progress_(false), |
| 50 pending_sweeper_jobs_semaphore_(0), | 50 pending_sweeper_jobs_semaphore_(0), |
| 51 evacuation_(false), | 51 evacuation_(false), |
| 52 migration_slots_buffer_(NULL), | 52 migration_slots_buffer_(NULL), |
| 53 heap_(heap), | 53 heap_(heap), |
| 54 marking_deque_memory_(NULL), | 54 marking_deque_memory_(NULL), |
| 55 marking_deque_memory_committed_(false), | 55 marking_deque_memory_committed_(0), |
| 56 code_flusher_(NULL), | 56 code_flusher_(NULL), |
| 57 have_code_to_deoptimize_(false) { | 57 have_code_to_deoptimize_(false) { |
| 58 } | 58 } |
| 59 | 59 |
| 60 #ifdef VERIFY_HEAP | 60 #ifdef VERIFY_HEAP |
| 61 class VerifyMarkingVisitor : public ObjectVisitor { | 61 class VerifyMarkingVisitor : public ObjectVisitor { |
| 62 public: | 62 public: |
| 63 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} | 63 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} |
| 64 | 64 |
| 65 void VisitPointers(Object** start, Object** end) { | 65 void VisitPointers(Object** start, Object** end) { |
| (...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 219 VerifyEvacuation(heap->new_space()); | 219 VerifyEvacuation(heap->new_space()); |
| 220 | 220 |
| 221 VerifyEvacuationVisitor visitor; | 221 VerifyEvacuationVisitor visitor; |
| 222 heap->IterateStrongRoots(&visitor, VISIT_ALL); | 222 heap->IterateStrongRoots(&visitor, VISIT_ALL); |
| 223 } | 223 } |
| 224 #endif // VERIFY_HEAP | 224 #endif // VERIFY_HEAP |
| 225 | 225 |
| 226 | 226 |
| 227 void MarkCompactCollector::SetUp() { | 227 void MarkCompactCollector::SetUp() { |
| 228 free_list_old_space_.Reset(new FreeList(heap_->old_space())); | 228 free_list_old_space_.Reset(new FreeList(heap_->old_space())); |
| 229 EnsureMarkingDequeIsCommittedAndInitialize(256 * KB); | 229 EnsureMarkingDequeIsReserved(); |
| 230 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize); |
| 230 } | 231 } |
| 231 | 232 |
| 232 | 233 |
| 233 void MarkCompactCollector::TearDown() { | 234 void MarkCompactCollector::TearDown() { |
| 234 AbortCompaction(); | 235 AbortCompaction(); |
| 235 delete marking_deque_memory_; | 236 delete marking_deque_memory_; |
| 236 } | 237 } |
| 237 | 238 |
| 238 | 239 |
| 239 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { | 240 void MarkCompactCollector::AddEvacuationCandidate(Page* p) { |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 329 } | 330 } |
| 330 #endif | 331 #endif |
| 331 | 332 |
| 332 | 333 |
| 333 void MarkCompactCollector::CollectGarbage() { | 334 void MarkCompactCollector::CollectGarbage() { |
| 334 // Make sure that Prepare() has been called. The individual steps below will | 335 // Make sure that Prepare() has been called. The individual steps below will |
| 335 // update the state as they proceed. | 336 // update the state as they proceed. |
| 336 DCHECK(state_ == PREPARE_GC); | 337 DCHECK(state_ == PREPARE_GC); |
| 337 | 338 |
| 338 MarkLiveObjects(); | 339 MarkLiveObjects(); |
| 340 |
| 339 DCHECK(heap_->incremental_marking()->IsStopped()); | 341 DCHECK(heap_->incremental_marking()->IsStopped()); |
| 340 | 342 |
| 341 // ClearNonLiveReferences can deoptimize code in dependent code arrays. | 343 // ClearNonLiveReferences can deoptimize code in dependent code arrays. |
| 342 // Process weak cells before so that weak cells in dependent code | 344 // Process weak cells before so that weak cells in dependent code |
| 343 // arrays are cleared or contain only live code objects. | 345 // arrays are cleared or contain only live code objects. |
| 344 ProcessAndClearWeakCells(); | 346 ProcessAndClearWeakCells(); |
| 345 | 347 |
| 346 if (FLAG_collect_maps) ClearNonLiveReferences(); | 348 if (FLAG_collect_maps) ClearNonLiveReferences(); |
| 347 | 349 |
| 348 ClearWeakCollections(); | 350 ClearWeakCollections(); |
| (...skipping 1793 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2142 } | 2144 } |
| 2143 Object* undefined = heap()->undefined_value(); | 2145 Object* undefined = heap()->undefined_value(); |
| 2144 for (int i = new_length; i < length; i++) { | 2146 for (int i = new_length; i < length; i++) { |
| 2145 retained_maps->Clear(i, undefined); | 2147 retained_maps->Clear(i, undefined); |
| 2146 } | 2148 } |
| 2147 if (new_length != length) retained_maps->SetLength(new_length); | 2149 if (new_length != length) retained_maps->SetLength(new_length); |
| 2148 ProcessMarkingDeque(); | 2150 ProcessMarkingDeque(); |
| 2149 } | 2151 } |
| 2150 | 2152 |
| 2151 | 2153 |
| 2152 void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize( | 2154 void MarkCompactCollector::EnsureMarkingDequeIsReserved() { |
| 2153 size_t max_size) { | 2155 DCHECK(!marking_deque_.in_use()); |
| 2156 if (marking_deque_memory_ == NULL) { |
| 2157 marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize); |
| 2158 marking_deque_memory_committed_ = 0; |
| 2159 } |
| 2160 if (marking_deque_memory_ == NULL) { |
| 2161 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved"); |
| 2162 } |
| 2163 } |
| 2164 |
| 2165 |
| 2166 void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) { |
| 2154 // If the marking deque is too small, we try to allocate a bigger one. | 2167 // If the marking deque is too small, we try to allocate a bigger one. |
| 2155 // If that fails, make do with a smaller one. | 2168 // If that fails, make do with a smaller one. |
| 2156 for (size_t size = max_size; size >= 256 * KB; size >>= 1) { | 2169 CHECK(!marking_deque_.in_use()); |
| 2170 for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) { |
| 2157 base::VirtualMemory* memory = marking_deque_memory_; | 2171 base::VirtualMemory* memory = marking_deque_memory_; |
| 2158 bool is_committed = marking_deque_memory_committed_; | 2172 size_t currently_committed = marking_deque_memory_committed_; |
| 2159 | 2173 |
| 2160 if (memory == NULL || memory->size() < size) { | 2174 if (currently_committed == size) return; |
| 2161 // If we don't have memory or we only have small memory, then | 2175 |
| 2162 // try to reserve a new one. | 2176 if (currently_committed > size) { |
| 2163 memory = new base::VirtualMemory(size); | 2177 bool success = marking_deque_memory_->Uncommit( |
| 2164 is_committed = false; | 2178 reinterpret_cast<Address>(marking_deque_memory_->address()) + size, |
| 2179 currently_committed - size); |
| 2180 if (success) { |
| 2181 marking_deque_memory_committed_ = size; |
| 2182 return; |
| 2183 } |
| 2184 UNREACHABLE(); |
| 2165 } | 2185 } |
| 2166 if (is_committed) return; | 2186 |
| 2167 if (memory->IsReserved() && | 2187 bool success = memory->Commit( |
| 2168 memory->Commit(reinterpret_cast<Address>(memory->address()), | 2188 reinterpret_cast<Address>(memory->address()) + currently_committed, |
| 2169 memory->size(), | 2189 size - currently_committed, |
| 2170 false)) { // Not executable. | 2190 false); // Not executable. |
| 2171 if (marking_deque_memory_ != NULL && marking_deque_memory_ != memory) { | 2191 if (success) { |
| 2172 delete marking_deque_memory_; | 2192 marking_deque_memory_committed_ = size; |
| 2173 } | |
| 2174 marking_deque_memory_ = memory; | |
| 2175 marking_deque_memory_committed_ = true; | |
| 2176 InitializeMarkingDeque(); | |
| 2177 return; | 2193 return; |
| 2178 } else { | |
| 2179 // Commit failed, so we are under memory pressure. If this was the | |
| 2180 // previously reserved area we tried to commit, then remove references | |
| 2181 // to it before deleting it and unreserving it. | |
| 2182 if (marking_deque_memory_ == memory) { | |
| 2183 marking_deque_memory_ = NULL; | |
| 2184 marking_deque_memory_committed_ = false; | |
| 2185 } | |
| 2186 delete memory; // Will also unreserve the virtual allocation. | |
| 2187 } | 2194 } |
| 2188 } | 2195 } |
| 2189 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted"); | 2196 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted"); |
| 2190 } | 2197 } |
| 2191 | 2198 |
| 2192 | 2199 |
| 2193 void MarkCompactCollector::InitializeMarkingDeque() { | 2200 void MarkCompactCollector::InitializeMarkingDeque() { |
| 2194 if (marking_deque_memory_committed_) { | 2201 DCHECK(!marking_deque_.in_use()); |
| 2195 Address addr = static_cast<Address>(marking_deque_memory_->address()); | 2202 DCHECK(marking_deque_memory_committed_ > 0); |
| 2196 size_t size = marking_deque_memory_->size(); | 2203 Address addr = static_cast<Address>(marking_deque_memory_->address()); |
| 2197 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; | 2204 size_t size = marking_deque_memory_committed_; |
| 2198 marking_deque_.Initialize(addr, addr + size); | 2205 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; |
| 2199 } | 2206 marking_deque_.Initialize(addr, addr + size); |
| 2200 } | 2207 } |
| 2201 | 2208 |
| 2202 | 2209 |
| 2203 void MarkCompactCollector::UncommitMarkingDeque() { | 2210 void MarkingDeque::Initialize(Address low, Address high) { |
| 2204 if (marking_deque_memory_committed_) { | 2211 DCHECK(!in_use_); |
| 2205 bool success = marking_deque_memory_->Uncommit( | 2212 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low); |
| 2206 reinterpret_cast<Address>(marking_deque_memory_->address()), | 2213 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high); |
| 2207 marking_deque_memory_->size()); | 2214 array_ = obj_low; |
| 2208 CHECK(success); | 2215 mask_ = base::bits::RoundDownToPowerOfTwo32( |
| 2209 marking_deque_memory_committed_ = false; | 2216 static_cast<uint32_t>(obj_high - obj_low)) - |
| 2210 } | 2217 1; |
| 2218 top_ = bottom_ = 0; |
| 2219 overflowed_ = false; |
| 2220 in_use_ = true; |
| 2211 } | 2221 } |
| 2212 | 2222 |
| 2213 | 2223 |
| 2224 void MarkingDeque::Uninitialize(bool aborting) { |
| 2225 if (!aborting) { |
| 2226 DCHECK(IsEmpty()); |
| 2227 DCHECK(!overflowed_); |
| 2228 } |
| 2229 DCHECK(in_use_); |
| 2230 top_ = bottom_ = 0xdecbad; |
| 2231 in_use_ = false; |
| 2232 } |
| 2233 |
| 2234 |
| 2214 void MarkCompactCollector::MarkLiveObjects() { | 2235 void MarkCompactCollector::MarkLiveObjects() { |
| 2215 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK); | 2236 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK); |
| 2216 double start_time = 0.0; | 2237 double start_time = 0.0; |
| 2217 if (FLAG_print_cumulative_gc_stat) { | 2238 if (FLAG_print_cumulative_gc_stat) { |
| 2218 start_time = base::OS::TimeCurrentMillis(); | 2239 start_time = base::OS::TimeCurrentMillis(); |
| 2219 } | 2240 } |
| 2220 // The recursive GC marker detects when it is nearing stack overflow, | 2241 // The recursive GC marker detects when it is nearing stack overflow, |
| 2221 // and switches to a different marking system. JS interrupts interfere | 2242 // and switches to a different marking system. JS interrupts interfere |
| 2222 // with the C stack limit check. | 2243 // with the C stack limit check. |
| 2223 PostponeInterruptsScope postpone(isolate()); | 2244 PostponeInterruptsScope postpone(isolate()); |
| 2224 | 2245 |
| 2225 IncrementalMarking* incremental_marking = heap_->incremental_marking(); | 2246 IncrementalMarking* incremental_marking = heap_->incremental_marking(); |
| 2226 if (was_marked_incrementally_) { | 2247 if (was_marked_incrementally_) { |
| 2227 incremental_marking->Finalize(); | 2248 incremental_marking->Finalize(); |
| 2228 } else { | 2249 } else { |
| 2229 // Abort any pending incremental activities e.g. incremental sweeping. | 2250 // Abort any pending incremental activities e.g. incremental sweeping. |
| 2230 incremental_marking->Abort(); | 2251 incremental_marking->Abort(); |
| 2231 InitializeMarkingDeque(); | 2252 if (marking_deque_.in_use()) { |
| 2253 marking_deque_.Uninitialize(true); |
| 2254 } |
| 2232 } | 2255 } |
| 2233 | 2256 |
| 2234 #ifdef DEBUG | 2257 #ifdef DEBUG |
| 2235 DCHECK(state_ == PREPARE_GC); | 2258 DCHECK(state_ == PREPARE_GC); |
| 2236 state_ = MARK_LIVE_OBJECTS; | 2259 state_ = MARK_LIVE_OBJECTS; |
| 2237 #endif | 2260 #endif |
| 2238 | 2261 |
| 2239 EnsureMarkingDequeIsCommittedAndInitialize(); | 2262 EnsureMarkingDequeIsCommittedAndInitialize( |
| 2263 MarkCompactCollector::kMaxMarkingDequeSize); |
| 2240 | 2264 |
| 2241 PrepareForCodeFlushing(); | 2265 PrepareForCodeFlushing(); |
| 2242 | 2266 |
| 2243 RootMarkingVisitor root_visitor(heap()); | 2267 RootMarkingVisitor root_visitor(heap()); |
| 2244 MarkRoots(&root_visitor); | 2268 MarkRoots(&root_visitor); |
| 2245 | 2269 |
| 2246 ProcessTopOptimizedFrame(&root_visitor); | 2270 ProcessTopOptimizedFrame(&root_visitor); |
| 2247 | 2271 |
| 2248 // Retaining dying maps should happen before or during ephemeral marking | 2272 // Retaining dying maps should happen before or during ephemeral marking |
| 2249 // because a map could keep the key of an ephemeron alive. Note that map | 2273 // because a map could keep the key of an ephemeron alive. Note that map |
| (...skipping 2413 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4663 SlotsBuffer* buffer = *buffer_address; | 4687 SlotsBuffer* buffer = *buffer_address; |
| 4664 while (buffer != NULL) { | 4688 while (buffer != NULL) { |
| 4665 SlotsBuffer* next_buffer = buffer->next(); | 4689 SlotsBuffer* next_buffer = buffer->next(); |
| 4666 DeallocateBuffer(buffer); | 4690 DeallocateBuffer(buffer); |
| 4667 buffer = next_buffer; | 4691 buffer = next_buffer; |
| 4668 } | 4692 } |
| 4669 *buffer_address = NULL; | 4693 *buffer_address = NULL; |
| 4670 } | 4694 } |
| 4671 } // namespace internal | 4695 } // namespace internal |
| 4672 } // namespace v8 | 4696 } // namespace v8 |
| OLD | NEW |