Index: src/heap/gc-idle-time-handler.cc |
diff --git a/src/heap/gc-idle-time-handler.cc b/src/heap/gc-idle-time-handler.cc |
index 7c74dcb3dac79d1068e2f26ca3ed7716a5227cdc..414f697a03c97ef46a8c781176fd418e9c6194bf 100644 |
--- a/src/heap/gc-idle-time-handler.cc |
+++ b/src/heap/gc-idle-time-handler.cc |
@@ -74,7 +74,7 @@ size_t GCIdleTimeHandler::EstimateMarkCompactTime( |
size_t GCIdleTimeHandler::EstimateScavengeTime( |
size_t new_space_size, size_t scavenge_speed_in_bytes_per_ms) { |
if (scavenge_speed_in_bytes_per_ms == 0) { |
- scavenge_speed_in_bytes_per_ms = kInitialConservativeScavengeSpeed; |
+ return GCIdleTimeHandler::kLargeIdleTime; |
} |
return new_space_size / scavenge_speed_in_bytes_per_ms; |
} |
@@ -92,9 +92,51 @@ bool GCIdleTimeHandler::ScavangeMayHappenSoon( |
} |
+bool GCIdleTimeHandler::DoEarlyScavenge( |
+ size_t idle_time_in_ms, size_t new_space_size, |
+ size_t available_new_space_memory, size_t scavenger_speed_in_bytes_per_ms) { |
+ // For small idle times, we conservatively decide not to do a Scavenge. |
+ if (idle_time_in_ms < kLargeIdleTime && |
+ idle_time_in_ms > kMaxFrameRenderingIdleTime) { |
jochen (gone - plz use gerrit)
2014/09/18 18:46:28
that is 16 < idle_time_in_ms < 10 which is always
|
+ return false; |
+ } |
+ |
+ size_t currently_allocated = new_space_size - available_new_space_memory; |
+ size_t current_scavenging_estimate_in_ms = EstimateScavengeTime( |
+ currently_allocated, scavenger_speed_in_bytes_per_ms); |
+ |
+ // We just force a Scavenge if it may take really long. |
+ if (current_scavenging_estimate_in_ms < kForceScavengeThreshold) return false; |
+ |
+ if (idle_time_in_ms < current_scavenging_estimate_in_ms) return false; |
jochen (gone - plz use gerrit)
2014/09/18 18:46:28
so basically, we force a scavenge if it would take
|
+ return true; |
+} |
+ |
+ |
+bool GCIdleTimeHandler::DoScavenge( |
+ size_t idle_time_in_ms, size_t new_space_size, |
+ size_t available_new_space_memory, size_t scavenge_speed_in_bytes_per_ms, |
+ size_t new_space_allocation_throughput_in_bytes_per_ms) { |
+ // Do not invoke scavenger for large idle notification requests. |
+ if (idle_time_in_ms > kMaxFrameRenderingIdleTime) return false; |
+ |
+ // We may force an early Scavenge if we think that the next Scavenge may take |
+ // long or if we think that a Scavenge may happen soon. |
+ return DoEarlyScavenge(idle_time_in_ms, new_space_size, |
ulan
2014/09/19 10:47:47
As discussed offline, we can probably unify these
|
+ available_new_space_memory, |
+ scavenge_speed_in_bytes_per_ms) || |
+ (ScavangeMayHappenSoon( |
+ available_new_space_memory, |
+ new_space_allocation_throughput_in_bytes_per_ms) && |
+ idle_time_in_ms >= |
+ EstimateScavengeTime(new_space_size, |
+ scavenge_speed_in_bytes_per_ms)); |
+} |
+ |
+ |
// The following logic is implemented by the controller: |
-// (1) If the new space is almost full and we can effort a Scavenge, then a |
-// Scavenge is performed. |
+// (1) If the new space is almost full and we can effort a Scavenge or if the |
+// next Scavenge will very likely take long, then a Scavenge is performed. |
// (2) If there is currently no MarkCompact idle round going on, we start a |
// new idle round if enough garbage was created or we received a context |
// disposal event. Otherwise we do not perform garbage collection to keep |
@@ -110,14 +152,13 @@ bool GCIdleTimeHandler::ScavangeMayHappenSoon( |
// that this currently may trigger a full garbage collection. |
GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms, |
HeapState heap_state) { |
- if (ScavangeMayHappenSoon( |
- heap_state.available_new_space_memory, |
- heap_state.new_space_allocation_throughput_in_bytes_per_ms) && |
- idle_time_in_ms >= |
- EstimateScavengeTime(heap_state.new_space_capacity, |
- heap_state.scavenge_speed_in_bytes_per_ms)) { |
+ if (DoScavenge(idle_time_in_ms, heap_state.new_space_capacity, |
+ heap_state.available_new_space_memory, |
+ heap_state.scavenge_speed_in_bytes_per_ms, |
+ heap_state.new_space_allocation_throughput_in_bytes_per_ms)) { |
return GCIdleTimeAction::Scavenge(); |
} |
+ |
if (IsMarkCompactIdleRoundFinished()) { |
if (EnoughGarbageSinceLastIdleRound() || heap_state.contexts_disposed > 0) { |
StartIdleRound(); |