Index: src/heap.cc |
diff --git a/src/heap.cc b/src/heap.cc |
index f1ec56ce5a862ad7a6ae811f1391408335db802c..d0dbdb1eab2fcdad0c88ab0a987bb245bd0345d4 100644 |
--- a/src/heap.cc |
+++ b/src/heap.cc |
@@ -126,6 +126,13 @@ int Heap::always_allocate_scope_depth_ = 0; |
int Heap::linear_allocation_scope_depth_ = 0; |
int Heap::contexts_disposed_ = 0; |
+int Heap::young_survivors_after_last_gc_ = 0; |
+int Heap::high_survival_rate_period_length_ = 0; |
+int Heap::survival_rate_ = 0; |
+Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE; |
+Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE; |
+bool Heap::bumped_old_gen_limits_ = false; |
+ |
#ifdef DEBUG |
bool Heap::allocation_allowed_ = true; |
@@ -582,6 +589,29 @@ static void VerifyPageWatermarkValidity(PagedSpace* space, |
} |
#endif |
+void Heap::UpdateSurvivalRateTrend(int start_new_space_size) { |
+ double survival_rate = |
+ (static_cast<double>(young_survivors_after_last_gc_) * 100) / |
+ start_new_space_size; |
+ |
+ if (survival_rate > kYoungSurvivalRateThreshold) { |
+ high_survival_rate_period_length_++; |
+ } else { |
+ high_survival_rate_period_length_ = 0; |
+ } |
+ |
+ double survival_rate_diff = survival_rate_ - survival_rate; |
+ |
+ if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) { |
+ set_survival_rate_trend(DECREASING); |
+ } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) { |
+ set_survival_rate_trend(INCREASING); |
+ } else { |
+ set_survival_rate_trend(STABLE); |
+ } |
+ |
+ survival_rate_ = survival_rate; |
+} |
void Heap::PerformGarbageCollection(AllocationSpace space, |
GarbageCollector collector, |
@@ -604,6 +634,8 @@ void Heap::PerformGarbageCollection(AllocationSpace space, |
EnsureFromSpaceIsCommitted(); |
+ int start_new_space_size = Heap::new_space()->Size(); |
+ |
if (collector == MARK_COMPACTOR) { |
if (FLAG_flush_code) { |
// Flush all potentially unused code. |
@@ -613,16 +645,50 @@ void Heap::PerformGarbageCollection(AllocationSpace space, |
// Perform mark-sweep with optional compaction. |
MarkCompact(tracer); |
+ bool high_survival_rate_during_scavenges = IsHighSurvivalRate() && |
+ IsStableOrIncreasingSurvivalTrend(); |
+ |
+ UpdateSurvivalRateTrend(start_new_space_size); |
+ |
int old_gen_size = PromotedSpaceSize(); |
old_gen_promotion_limit_ = |
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); |
old_gen_allocation_limit_ = |
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); |
+ |
+ if (high_survival_rate_during_scavenges && |
+ IsStableOrIncreasingSurvivalTrend()) { |
+ // Stable high survival rates of young objects both during partial and |
+ // full collection indicate that mutator is either building or modifying |
+ // a structure with a long lifetime. |
+ // In this case we aggressively raise old generation memory limits to |
+ // postpone subsequent mark-sweep collection and thus trade memory |
+ // space for the mutation speed. |
+ old_gen_promotion_limit_ *= 2; |
+ old_gen_allocation_limit_ *= 2; |
+ bumped_old_gen_limits_ = true; |
+ } |
+ |
old_gen_exhausted_ = false; |
} else { |
tracer_ = tracer; |
Scavenge(); |
tracer_ = NULL; |
+ |
+ UpdateSurvivalRateTrend(start_new_space_size); |
+ |
+ if (bumped_old_gen_limits_ && |
+ !IsHighSurvivalRate() && |
+ !IsIncreasingSurvivalTrend()) { |
+ // We previously observed high survival rates in young space and decided |
+ // to bump old space allocation limits to trade the space for the speed |
+ // but now survival rates are dropping which indicates that mutator |
+ // finished updating tenured data structure. So we can decrease old space |
+ // limits to guarantee an early full GC cycle and reduce memory footprint. |
+ old_gen_promotion_limit_ /= 2; |
+ old_gen_allocation_limit_ /= 2; |
+ bumped_old_gen_limits_ = false; |
+ } |
} |
Counters::objs_since_last_young.Set(0); |