Chromium Code Reviews| Index: src/heap.cc |
| diff --git a/src/heap.cc b/src/heap.cc |
| index d5c40ad154ad0e8de49a1ba86de2a659c352dbb7..c37e38421b60362a55614cc8686128683340fe97 100644 |
| --- a/src/heap.cc |
| +++ b/src/heap.cc |
| @@ -517,6 +517,9 @@ void Heap::GarbageCollectionEpilogue() { |
| if (casted->DigestPretenuringFeedback()) { |
| if (casted->GetPretenureMode() == TENURED) { |
| tenure_decisions++; |
| + casted->dependent_code()->DeoptimizeDependentCodeGroup( |
|
Michael Starzinger
2013/12/02 15:12:54
IMHO this should be done as part of DigestPretenur
Hannes Payer (out of office)
2014/01/09 14:37:13
Done.
|
| + isolate_, |
| + DependentCode::kAllocationSiteTenuringChangedGroup); |
| } else { |
| dont_tenure_decisions++; |
| } |
| @@ -1072,13 +1075,6 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, |
| PrintPID("Limited new space size due to high promotion rate: %d MB\n", |
| new_space_.InitialCapacity() / MB); |
| } |
| - // Support for global pre-tenuring uses the high promotion mode as a |
| - // heuristic indicator of whether to pretenure or not, we trigger |
| - // deoptimization here to take advantage of pre-tenuring as soon as |
| - // possible. |
| - if (FLAG_pretenuring) { |
| - isolate_->stack_guard()->FullDeopt(); |
| - } |
| } else if (new_space_high_promotion_mode_active_ && |
| IsStableOrDecreasingSurvivalTrend() && |
| IsLowSurvivalRate()) { |
| @@ -1090,11 +1086,6 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, |
| PrintPID("Unlimited new space size due to low promotion rate: %d MB\n", |
| new_space_.MaximumCapacity() / MB); |
| } |
| - // Trigger deoptimization here to turn off pre-tenuring as soon as |
| - // possible. |
| - if (FLAG_pretenuring) { |
| - isolate_->stack_guard()->FullDeopt(); |
| - } |
| } |
| if (new_space_high_promotion_mode_active_ && |
| @@ -1184,6 +1175,8 @@ void Heap::MarkCompact(GCTracer* tracer) { |
| gc_state_ = MARK_COMPACT; |
| LOG(isolate_, ResourceEvent("markcompact", "begin")); |
| + uint64_t size_of_objects_before_gc = SizeOfObjects(); |
| + |
| mark_compact_collector_.Prepare(tracer); |
| ms_count_++; |
| @@ -1200,6 +1193,8 @@ void Heap::MarkCompact(GCTracer* tracer) { |
| isolate_->counters()->objs_since_last_full()->Set(0); |
| flush_monomorphic_ics_ = false; |
| + |
| + EvaluateLocalPretenuring(size_of_objects_before_gc); |
| } |
| @@ -1939,6 +1934,36 @@ void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer, |
| } |
| +void Heap::ClearAllAllocationSitesDependentCode() { |
| + Object* cur = allocation_sites_list(); |
| + while (cur->IsAllocationSite()) { |
| + AllocationSite* casted = AllocationSite::cast(cur); |
| + casted->ResetPretenureDecision(); |
| + cur = casted->weak_next(); |
| + } |
| +} |
| + |
| + |
| +void Heap::EvaluateLocalPretenuring(uint64_t size_of_objects_before_gc) { |
| + uint64_t size_of_objects_after_gc = SizeOfObjects(); |
| + double old_generation_survival_rate = |
| + (static_cast<double>(size_of_objects_after_gc) * 100) / |
| + static_cast<double>(size_of_objects_before_gc); |
| + |
| + if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) { |
| + // Too many objects died in the old generation, pretenuring of wrong |
| + // allocation sites may be the cause for that. We have to deopt all |
| + // dependent code registered in the allocation sites to re-evaluate |
| + // our pretenuring decisions. |
| + ClearAllAllocationSitesDependentCode(); |
| + if (FLAG_trace_pretenuring) { |
| + PrintF("Deopt all allocation sites dependent code due to low survival " |
| + "rate in the old generation %f\n", old_generation_survival_rate); |
| + } |
| + } |
| +} |
| + |
| + |
| void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { |
| DisallowHeapAllocation no_allocation; |