Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/heap/heap.cc

Issue 2364923002: [heap] New heuristics for starting of incremental marking. (Closed)
Patch Set: rebase Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/context-slot-cache.h" 9 #include "src/ast/context-slot-cache.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after
260 isolate_->counters()->gc_compactor_caused_by_request()->Increment(); 260 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
261 *reason = "GC in old space requested"; 261 *reason = "GC in old space requested";
262 return MARK_COMPACTOR; 262 return MARK_COMPACTOR;
263 } 263 }
264 264
265 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) { 265 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
266 *reason = "GC in old space forced by flags"; 266 *reason = "GC in old space forced by flags";
267 return MARK_COMPACTOR; 267 return MARK_COMPACTOR;
268 } 268 }
269 269
270 // Is enough data promoted to justify a global GC?
271 if (OldGenerationAllocationLimitReached()) {
272 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
273 *reason = "promotion limit reached";
274 return MARK_COMPACTOR;
275 }
276
277 // Is there enough space left in OLD to guarantee that a scavenge can 270 // Is there enough space left in OLD to guarantee that a scavenge can
278 // succeed? 271 // succeed?
279 // 272 //
280 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available 273 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
281 // for object promotion. It counts only the bytes that the memory 274 // for object promotion. It counts only the bytes that the memory
282 // allocator has not yet allocated from the OS and assigned to any space, 275 // allocator has not yet allocated from the OS and assigned to any space,
283 // and does not count available bytes already in the old space or code 276 // and does not count available bytes already in the old space or code
284 // space. Undercounting is safe---we may get an unrequested full GC when 277 // space. Undercounting is safe---we may get an unrequested full GC when
285 // a scavenge would have succeeded. 278 // a scavenge would have succeeded.
286 if (memory_allocator()->MaxAvailable() <= new_space_->Size()) { 279 if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
(...skipping 770 matching lines...) Expand 10 before | Expand all | Expand 10 after
1057 GarbageCollectionReason gc_reason, 1050 GarbageCollectionReason gc_reason,
1058 GCCallbackFlags gc_callback_flags) { 1051 GCCallbackFlags gc_callback_flags) {
1059 DCHECK(incremental_marking()->IsStopped()); 1052 DCHECK(incremental_marking()->IsStopped());
1060 set_current_gc_flags(gc_flags); 1053 set_current_gc_flags(gc_flags);
1061 current_gc_callback_flags_ = gc_callback_flags; 1054 current_gc_callback_flags_ = gc_callback_flags;
1062 incremental_marking()->Start(gc_reason); 1055 incremental_marking()->Start(gc_reason);
1063 } 1056 }
1064 1057
1065 void Heap::StartIncrementalMarkingIfAllocationLimitIsReached( 1058 void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
1066 int gc_flags, const GCCallbackFlags gc_callback_flags) { 1059 int gc_flags, const GCCallbackFlags gc_callback_flags) {
1067 if (incremental_marking()->IsStopped() && 1060 if (incremental_marking()->IsStopped()) {
1068 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { 1061 IncrementalMarkingLimit reached_limit = ReachedIncrementalMarkingLimit();
1069 StartIncrementalMarking(gc_flags, GarbageCollectionReason::kAllocationLimit, 1062 if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
1070 gc_callback_flags); 1063 incremental_marking()->incremental_marking_job()->ScheduleTask(this);
1064 } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
1065 StartIncrementalMarking(gc_flags,
1066 GarbageCollectionReason::kAllocationLimit,
1067 gc_callback_flags);
1068 }
1071 } 1069 }
1072 } 1070 }
1073 1071
1074 void Heap::StartIdleIncrementalMarking(GarbageCollectionReason gc_reason) { 1072 void Heap::StartIdleIncrementalMarking(GarbageCollectionReason gc_reason) {
1075 gc_idle_time_handler_->ResetNoProgressCounter(); 1073 gc_idle_time_handler_->ResetNoProgressCounter();
1076 StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason, 1074 StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
1077 kNoGCCallbackFlags); 1075 kNoGCCallbackFlags);
1078 } 1076 }
1079 1077
1080 1078
(...skipping 4230 matching lines...) Expand 10 before | Expand all | Expand 10 after
5311 CalculateOldGenerationAllocationLimit(factor, old_gen_size); 5309 CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5312 5310
5313 if (FLAG_trace_gc_verbose) { 5311 if (FLAG_trace_gc_verbose) {
5314 isolate_->PrintWithTimestamp("Grow: old size: %" V8PRIdPTR 5312 isolate_->PrintWithTimestamp("Grow: old size: %" V8PRIdPTR
5315 " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n", 5313 " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
5316 old_gen_size / KB, 5314 old_gen_size / KB,
5317 old_generation_allocation_limit_ / KB, factor); 5315 old_generation_allocation_limit_ / KB, factor);
5318 } 5316 }
5319 } 5317 }
5320 5318
5321
5322 void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size, 5319 void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
5323 double gc_speed, 5320 double gc_speed,
5324 double mutator_speed) { 5321 double mutator_speed) {
5325 double factor = HeapGrowingFactor(gc_speed, mutator_speed); 5322 double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5326 intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size); 5323 intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5327 if (limit < old_generation_allocation_limit_) { 5324 if (limit < old_generation_allocation_limit_) {
5328 if (FLAG_trace_gc_verbose) { 5325 if (FLAG_trace_gc_verbose) {
5329 isolate_->PrintWithTimestamp( 5326 isolate_->PrintWithTimestamp(
5330 "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR 5327 "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
5331 " KB, " 5328 " KB, "
5332 "new limit: %" V8PRIdPTR " KB (%.1f)\n", 5329 "new limit: %" V8PRIdPTR " KB (%.1f)\n",
5333 old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB, 5330 old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
5334 factor); 5331 factor);
5335 } 5332 }
5336 old_generation_allocation_limit_ = limit; 5333 old_generation_allocation_limit_ = limit;
5337 } 5334 }
5338 } 5335 }
5339 5336
5337 // This predicate is called when an old generation space cannot allocated from
5338 // the free list and is about to add a new page. Returning false will cause a
5339 // major GC. It happens when the old generation allocation limit is reached and
5340 // - either we need to optimize for memory usage,
5341 // - or the incremental marking is not in progress and we cannot start it.
5342 bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
5343 if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
5344 // We reached the old generation allocation limit.
5345
5346 if (ShouldOptimizeForMemoryUsage()) return false;
5347
5348 if (incremental_marking()->IsStopped() &&
5349 ReachedIncrementalMarkingLimit() == IncrementalMarkingLimit::kNoLimit) {
5350 // We cannot start incremental marking.
5351 return false;
5352 }
5353 return true;
5354 }
5355
5356 // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
5357 // The kNoLimit means that either incremental marking is disabled or it is too
5358 // early to start incremental marking.
5359 // The kSoftLimit means that incremental marking should be started soon.
5360 // The kHardLimit means that incremental marking should be started immediately.
5361 Heap::IncrementalMarkingLimit Heap::ReachedIncrementalMarkingLimit() {
Hannes Payer (out of office) 2016/09/27 09:16:53 nit: maybe rename to IncrementalMarkingLimitReache
ulan 2016/09/27 17:06:41 Renamed the function.
5362 if (!incremental_marking()->CanBeActivated() ||
5363 PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) {
5364 // Incremental marking is disabled or it is too early to start.
5365 return IncrementalMarkingLimit::kNoLimit;
5366 }
5367 if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
5368 HighMemoryPressure()) {
5369 // If there is high memory pressure or stress testing is enabled, then
5370 // start marking immediately.
5371 return IncrementalMarkingLimit::kHardLimit;
5372 }
5373 intptr_t old_generation_space_available = OldGenerationSpaceAvailable();
5374 if (old_generation_space_available > new_space_->Capacity()) {
5375 return IncrementalMarkingLimit::kNoLimit;
5376 }
5377 // We are close to the allocation limit.
5378 // Choose between the hard and the soft limits.
5379 if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
5380 return IncrementalMarkingLimit::kHardLimit;
5381 }
5382 return IncrementalMarkingLimit::kSoftLimit;
5383 }
5340 5384
5341 void Heap::EnableInlineAllocation() { 5385 void Heap::EnableInlineAllocation() {
5342 if (!inline_allocation_disabled_) return; 5386 if (!inline_allocation_disabled_) return;
5343 inline_allocation_disabled_ = false; 5387 inline_allocation_disabled_ = false;
5344 5388
5345 // Update inline allocation limit for new space. 5389 // Update inline allocation limit for new space.
5346 new_space()->UpdateInlineAllocationLimit(0); 5390 new_space()->UpdateInlineAllocationLimit(0);
5347 } 5391 }
5348 5392
5349 5393
(...skipping 1147 matching lines...) Expand 10 before | Expand all | Expand 10 after
6497 } 6541 }
6498 6542
6499 6543
6500 // static 6544 // static
6501 int Heap::GetStaticVisitorIdForMap(Map* map) { 6545 int Heap::GetStaticVisitorIdForMap(Map* map) {
6502 return StaticVisitorBase::GetVisitorId(map); 6546 return StaticVisitorBase::GetVisitorId(map);
6503 } 6547 }
6504 6548
6505 } // namespace internal 6549 } // namespace internal
6506 } // namespace v8 6550 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698