Index: src/heap/incremental-marking.cc |
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc |
index 57ed020228ab600222bdb25ca05018cec545462a..f585387dc3fa84d5601a39f02e91c868a808c018 100644 |
--- a/src/heap/incremental-marking.cc |
+++ b/src/heap/incremental-marking.cc |
@@ -30,7 +30,13 @@ |
observer_(*this, kAllocatedThreshold), |
state_(STOPPED), |
is_compacting_(false), |
+ steps_count_(0), |
+ old_generation_space_available_at_start_of_incremental_(0), |
+ old_generation_space_used_at_start_of_incremental_(0), |
+ bytes_rescanned_(0), |
should_hurry_(false), |
+ marking_speed_(0), |
+ bytes_scanned_(0), |
allocated_(0), |
write_barriers_invoked_since_last_step_(0), |
idle_marking_delay_counter_(0), |
@@ -75,11 +81,9 @@ |
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
int counter = chunk->write_barrier_counter(); |
if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { |
- marking->write_barriers_invoked_since_last_step_ = |
- Min(kMaxWriteBarrierCounter, |
- marking->write_barriers_invoked_since_last_step_ + |
- MemoryChunk::kWriteBarrierCounterGranularity - |
- chunk->write_barrier_counter()); |
+ marking->write_barriers_invoked_since_last_step_ += |
+ MemoryChunk::kWriteBarrierCounterGranularity - |
+ chunk->write_barrier_counter(); |
chunk->set_write_barrier_counter( |
MemoryChunk::kWriteBarrierCounterGranularity); |
} |
@@ -463,6 +467,21 @@ |
} |
+void IncrementalMarking::NotifyOfHighPromotionRate() { |
+ if (IsMarking()) { |
+ if (marking_speed_ < kFastMarking) { |
+ if (FLAG_trace_gc) { |
+ heap()->isolate()->PrintWithTimestamp( |
+ "Increasing marking speed to %d " |
+ "due to high promotion rate\n", |
+ static_cast<int>(kFastMarking)); |
+ } |
+ marking_speed_ = kFastMarking; |
+ } |
+ } |
+} |
+ |
+ |
static void PatchIncrementalMarkingRecordWriteStubs( |
Heap* heap, RecordWriteStub::Mode mode) { |
UnseededNumberDictionary* stubs = heap->code_stubs(); |
@@ -1050,10 +1069,86 @@ |
heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags, |
"old space step"); |
} else { |
- Step(allocated, GC_VIA_STACK_GUARD); |
- } |
-} |
- |
+ Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD); |
+ } |
+} |
+ |
+ |
+void IncrementalMarking::SpeedUp() { |
+ bool speed_up = false; |
+ |
+ if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { |
+ if (FLAG_trace_incremental_marking) { |
+ heap()->isolate()->PrintWithTimestamp( |
+ "[IncrementalMarking] Speed up marking after %d steps\n", |
+ static_cast<int>(kMarkingSpeedAccellerationInterval)); |
+ } |
+ speed_up = true; |
+ } |
+ |
+ bool space_left_is_very_small = |
+ (old_generation_space_available_at_start_of_incremental_ < 10 * MB); |
+ |
+ bool only_1_nth_of_space_that_was_available_still_left = |
+ (SpaceLeftInOldSpace() * (marking_speed_ + 1) < |
+ old_generation_space_available_at_start_of_incremental_); |
+ |
+ if (space_left_is_very_small || |
+ only_1_nth_of_space_that_was_available_still_left) { |
+ if (FLAG_trace_incremental_marking) |
+ heap()->isolate()->PrintWithTimestamp( |
+ "[IncrementalMarking] Speed up marking because of low space left\n"); |
+ speed_up = true; |
+ } |
+ |
+ bool size_of_old_space_multiplied_by_n_during_marking = |
+ (heap_->PromotedTotalSize() > |
+ (marking_speed_ + 1) * |
+ old_generation_space_used_at_start_of_incremental_); |
+ if (size_of_old_space_multiplied_by_n_during_marking) { |
+ speed_up = true; |
+ if (FLAG_trace_incremental_marking) { |
+ heap()->isolate()->PrintWithTimestamp( |
+ "[IncrementalMarking] Speed up marking because of heap size " |
+ "increase\n"); |
+ } |
+ } |
+ |
+ int64_t promoted_during_marking = |
+ heap_->PromotedTotalSize() - |
+ old_generation_space_used_at_start_of_incremental_; |
+ intptr_t delay = marking_speed_ * MB; |
+ intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); |
+ |
+ // We try to scan at at least twice the speed that we are allocating. |
+ if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { |
+ if (FLAG_trace_incremental_marking) { |
+ heap()->isolate()->PrintWithTimestamp( |
+ "[IncrementalMarking] Speed up marking because marker was not " |
+ "keeping up\n"); |
+ } |
+ speed_up = true; |
+ } |
+ |
+ if (speed_up) { |
+ if (state_ != MARKING) { |
+ if (FLAG_trace_incremental_marking) { |
+ heap()->isolate()->PrintWithTimestamp( |
+ "[IncrementalMarking] Postponing speeding up marking until marking " |
+ "starts\n"); |
+ } |
+ } else { |
+ marking_speed_ += kMarkingSpeedAccelleration; |
+ marking_speed_ = static_cast<int>( |
+ Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3))); |
+ if (FLAG_trace_incremental_marking) { |
+ heap()->isolate()->PrintWithTimestamp( |
+ "[IncrementalMarking] Marking speed increased to %d\n", |
+ marking_speed_); |
+ } |
+ } |
+ } |
+} |
void IncrementalMarking::FinalizeSweeping() { |
DCHECK(state_ == SWEEPING); |
@@ -1063,6 +1158,7 @@ |
heap_->mark_compact_collector()->EnsureSweepingCompleted(); |
} |
if (!heap_->mark_compact_collector()->sweeping_in_progress()) { |
+ bytes_scanned_ = 0; |
StartMarking(); |
} |
} |
@@ -1100,23 +1196,21 @@ |
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL); |
double start = heap_->MonotonicallyIncreasingTimeInMs(); |
- // Make sure that the step size is large enough to justify the overhead |
- // of interrupting the generated code to perform the step. |
- intptr_t min_bytes_to_process = GCIdleTimeHandler::EstimateMarkingStepSize( |
- kMinIncrementalStepDurationInMs, |
- heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond()); |
// The marking speed is driven either by the allocation rate or by the rate |
// at which we are having to check the color of objects in the write |
// barrier. |
// It is possible for a tight non-allocating loop to run a lot of write |
// barriers before we get here and check them (marking can only take place |
- // on allocation). |
- intptr_t bytes_to_process = Max( |
- min_bytes_to_process, kBytesToMarkPerAllocatedByte * allocated_ + |
- kBytesToMarkPerWriteBarrier * |
- write_barriers_invoked_since_last_step_); |
+ // on |
+ // allocation), so to reduce the lumpiness we don't use the write barriers |
+ // invoked since last step directly to determine the amount of work to do. |
+ intptr_t bytes_to_process = |
+ marking_speed_ * |
+ Max(allocated_, write_barriers_invoked_since_last_step_); |
allocated_ = 0; |
write_barriers_invoked_since_last_step_ = 0; |
+ |
+ bytes_scanned_ += bytes_to_process; |
if (state_ == SWEEPING) { |
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING); |
@@ -1166,6 +1260,12 @@ |
} |
} |
+ steps_count_++; |
+ |
+ // Speed up marking if we are marking too slow or if we are almost done |
+ // with marking. |
+ SpeedUp(); |
+ |
double end = heap_->MonotonicallyIncreasingTimeInMs(); |
double duration = (end - start); |
// Note that we report zero bytes here when sweeping was in progress or |
@@ -1178,7 +1278,14 @@ |
void IncrementalMarking::ResetStepCounters() { |
- allocated_ = 0; |
+ steps_count_ = 0; |
+ old_generation_space_available_at_start_of_incremental_ = |
+ SpaceLeftInOldSpace(); |
+ old_generation_space_used_at_start_of_incremental_ = |
+ heap_->PromotedTotalSize(); |
+ bytes_rescanned_ = 0; |
+ marking_speed_ = kInitialMarkingSpeed; |
+ bytes_scanned_ = 0; |
write_barriers_invoked_since_last_step_ = 0; |
} |