| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/heap.h" | 5 #include "src/heap/heap.h" |
| 6 | 6 |
| 7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
| 8 #include "src/api.h" | 8 #include "src/api.h" |
| 9 #include "src/ast/context-slot-cache.h" | 9 #include "src/ast/context-slot-cache.h" |
| 10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
| (...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 257 isolate_->counters()->gc_compactor_caused_by_request()->Increment(); | 257 isolate_->counters()->gc_compactor_caused_by_request()->Increment(); |
| 258 *reason = "GC in old space requested"; | 258 *reason = "GC in old space requested"; |
| 259 return MARK_COMPACTOR; | 259 return MARK_COMPACTOR; |
| 260 } | 260 } |
| 261 | 261 |
| 262 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) { | 262 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) { |
| 263 *reason = "GC in old space forced by flags"; | 263 *reason = "GC in old space forced by flags"; |
| 264 return MARK_COMPACTOR; | 264 return MARK_COMPACTOR; |
| 265 } | 265 } |
| 266 | 266 |
| 267 if (incremental_marking()->NeedsFinalization()) { |
| 268 return MARK_COMPACTOR; |
| 269 } |
| 270 |
| 267 // Is there enough space left in OLD to guarantee that a scavenge can | 271 // Is there enough space left in OLD to guarantee that a scavenge can |
| 268 // succeed? | 272 // succeed? |
| 269 // | 273 // |
| 270 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available | 274 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available |
| 271 // for object promotion. It counts only the bytes that the memory | 275 // for object promotion. It counts only the bytes that the memory |
| 272 // allocator has not yet allocated from the OS and assigned to any space, | 276 // allocator has not yet allocated from the OS and assigned to any space, |
| 273 // and does not count available bytes already in the old space or code | 277 // and does not count available bytes already in the old space or code |
| 274 // space. Undercounting is safe---we may get an unrequested full GC when | 278 // space. Undercounting is safe---we may get an unrequested full GC when |
| 275 // a scavenge would have succeeded. | 279 // a scavenge would have succeeded. |
| 276 if (memory_allocator()->MaxAvailable() <= new_space_->Size()) { | 280 if (memory_allocator()->MaxAvailable() <= new_space_->Size()) { |
| (...skipping 5013 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5290 } | 5294 } |
| 5291 old_generation_allocation_limit_ = limit; | 5295 old_generation_allocation_limit_ = limit; |
| 5292 } | 5296 } |
| 5293 } | 5297 } |
| 5294 | 5298 |
| 5295 // This predicate is called when an old generation space cannot allocated from | 5299 // This predicate is called when an old generation space cannot allocated from |
| 5296 // the free list and is about to add a new page. Returning false will cause a | 5300 // the free list and is about to add a new page. Returning false will cause a |
| 5297 // major GC. It happens when the old generation allocation limit is reached and | 5301 // major GC. It happens when the old generation allocation limit is reached and |
| 5298 // - either we need to optimize for memory usage, | 5302 // - either we need to optimize for memory usage, |
| 5299 // - or the incremental marking is not in progress and we cannot start it. | 5303 // - or the incremental marking is not in progress and we cannot start it. |
| 5300 bool Heap::ShouldExpandOldGenerationOnAllocationFailure() { | 5304 bool Heap::ShouldExpandOldGenerationOnSlowAllocation() { |
| 5301 if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true; | 5305 if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true; |
| 5302 // We reached the old generation allocation limit. | 5306 // We reached the old generation allocation limit. |
| 5303 | 5307 |
| 5304 if (ShouldOptimizeForMemoryUsage()) return false; | 5308 if (ShouldOptimizeForMemoryUsage()) return false; |
| 5305 | 5309 |
| 5310 if (incremental_marking()->NeedsFinalization()) { |
| 5311 return false; |
| 5312 } |
| 5313 |
| 5306 if (incremental_marking()->IsStopped() && | 5314 if (incremental_marking()->IsStopped() && |
| 5307 IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) { | 5315 IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) { |
| 5308 // We cannot start incremental marking. | 5316 // We cannot start incremental marking. |
| 5309 return false; | 5317 return false; |
| 5310 } | 5318 } |
| 5311 return true; | 5319 return true; |
| 5312 } | 5320 } |
| 5313 | 5321 |
| 5314 // This function returns either kNoLimit, kSoftLimit, or kHardLimit. | 5322 // This function returns either kNoLimit, kSoftLimit, or kHardLimit. |
| 5315 // The kNoLimit means that either incremental marking is disabled or it is too | 5323 // The kNoLimit means that either incremental marking is disabled or it is too |
| (...skipping 1160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6476 } | 6484 } |
| 6477 | 6485 |
| 6478 | 6486 |
| 6479 // static | 6487 // static |
| 6480 int Heap::GetStaticVisitorIdForMap(Map* map) { | 6488 int Heap::GetStaticVisitorIdForMap(Map* map) { |
| 6481 return StaticVisitorBase::GetVisitorId(map); | 6489 return StaticVisitorBase::GetVisitorId(map); |
| 6482 } | 6490 } |
| 6483 | 6491 |
| 6484 } // namespace internal | 6492 } // namespace internal |
| 6485 } // namespace v8 | 6493 } // namespace v8 |
| OLD | NEW |