Index: src/heap.cc |
diff --git a/src/heap.cc b/src/heap.cc |
index d5c40ad154ad0e8de49a1ba86de2a659c352dbb7..aa219f6d20d5a24f1f834447931ca457fe18cd76 100644 |
--- a/src/heap.cc |
+++ b/src/heap.cc |
@@ -517,6 +517,9 @@ void Heap::GarbageCollectionEpilogue() { |
if (casted->DigestPretenuringFeedback()) { |
if (casted->GetPretenureMode() == TENURED) { |
tenure_decisions++; |
+ casted->dependent_code()->DeoptimizeDependentCodeGroup( |
+ isolate_, |
+ DependentCode::kAllocationSiteTenuringChangedGroup); |
} else { |
dont_tenure_decisions++; |
} |
@@ -1072,13 +1075,6 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, |
PrintPID("Limited new space size due to high promotion rate: %d MB\n", |
new_space_.InitialCapacity() / MB); |
} |
- // Support for global pre-tenuring uses the high promotion mode as a |
- // heuristic indicator of whether to pretenure or not, we trigger |
- // deoptimization here to take advantage of pre-tenuring as soon as |
- // possible. |
- if (FLAG_pretenuring) { |
- isolate_->stack_guard()->FullDeopt(); |
- } |
} else if (new_space_high_promotion_mode_active_ && |
IsStableOrDecreasingSurvivalTrend() && |
IsLowSurvivalRate()) { |
@@ -1090,11 +1086,6 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, |
PrintPID("Unlimited new space size due to low promotion rate: %d MB\n", |
new_space_.MaximumCapacity() / MB); |
} |
- // Trigger deoptimization here to turn off pre-tenuring as soon as |
- // possible. |
- if (FLAG_pretenuring) { |
- isolate_->stack_guard()->FullDeopt(); |
- } |
} |
if (new_space_high_promotion_mode_active_ && |
@@ -1184,6 +1175,8 @@ void Heap::MarkCompact(GCTracer* tracer) { |
gc_state_ = MARK_COMPACT; |
LOG(isolate_, ResourceEvent("markcompact", "begin")); |
+ int64_t objects_before_gc = SizeOfObjects(); |
+ |
mark_compact_collector_.Prepare(tracer); |
ms_count_++; |
@@ -1200,6 +1193,8 @@ void Heap::MarkCompact(GCTracer* tracer) { |
isolate_->counters()->objs_since_last_full()->Set(0); |
flush_monomorphic_ics_ = false; |
+ |
+ EvaluateLocalPretenuring(objects_before_gc); |
} |
@@ -1939,6 +1934,38 @@ void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer, |
} |
+void Heap::DeoptAllAllocationSitesDependentCode() { |
+ Object* cur = allocation_sites_list(); |
+ while (cur->IsAllocationSite()) { |
+ AllocationSite* casted = AllocationSite::cast(cur); |
+ casted->dependent_code()->DeoptimizeDependentCodeGroup( |
mvstanton
2013/11/30 12:20:41
This is a good thing, but don't you also need to r
Hannes Payer (out of office)
2013/12/02 11:27:15
Done.
|
+ isolate_, |
+ DependentCode::kAllocationSiteTenuringChangedGroup); |
+ cur = casted->weak_next(); |
+ } |
+} |
+ |
+ |
+void Heap::EvaluateLocalPretenuring(int64_t objects_before_gc) { |
mvstanton
2013/11/30 12:20:41
objects_before_gc: Can you get "sizeof" in the nam
Hannes Payer (out of office)
2013/12/02 11:27:15
Done.
|
+ int64_t objects_after_gc = SizeOfObjects(); |
+ int64_t old_generation_survival_rate = |
+ (objects_after_gc * 100) / objects_before_gc; |
+ |
+ if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) { |
+ // Too many objects died in the old generation, pretenuring of wrong |
+ // allocation sites may be the cause for that. We have to deopt all |
+ // dependent code registered in the allocation sites to re-evaluate |
+ // our pretenuring decisions. |
+ DeoptAllAllocationSitesDependentCode(); |
+ if (FLAG_trace_pretenuring) { |
+ PrintF("Deopt all allocation sites dependent code due to low survival " |
+ "rate in the old generation %d\n", |
+ static_cast<intptr_t>(old_generation_survival_rate)); |
+ } |
+ } |
+} |
+ |
+ |
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { |
DisallowHeapAllocation no_allocation; |