Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(84)

Side by Side Diff: src/heap.cc

Issue 2809032: Take survival rates of young objects into account when choosing old generation limits. (Closed)
Patch Set: throttle down Created 10 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
119 int Heap::gc_count_ = 0; 119 int Heap::gc_count_ = 0;
120 120
121 GCTracer* Heap::tracer_ = NULL; 121 GCTracer* Heap::tracer_ = NULL;
122 122
123 int Heap::unflattened_strings_length_ = 0; 123 int Heap::unflattened_strings_length_ = 0;
124 124
125 int Heap::always_allocate_scope_depth_ = 0; 125 int Heap::always_allocate_scope_depth_ = 0;
126 int Heap::linear_allocation_scope_depth_ = 0; 126 int Heap::linear_allocation_scope_depth_ = 0;
127 int Heap::contexts_disposed_ = 0; 127 int Heap::contexts_disposed_ = 0;
128 128
129 int Heap::young_survivors_after_last_gc_ = 0;
130 int Heap::high_survival_rate_period_length_ = 0;
131 int Heap::survival_rate_ = 0;
132 Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
133 Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
134 bool Heap::bumped_old_gen_limits_ = false;
135
129 #ifdef DEBUG 136 #ifdef DEBUG
130 bool Heap::allocation_allowed_ = true; 137 bool Heap::allocation_allowed_ = true;
131 138
132 int Heap::allocation_timeout_ = 0; 139 int Heap::allocation_timeout_ = 0;
133 bool Heap::disallow_allocation_failure_ = false; 140 bool Heap::disallow_allocation_failure_ = false;
134 #endif // DEBUG 141 #endif // DEBUG
135 142
136 int GCTracer::alive_after_last_gc_ = 0; 143 int GCTracer::alive_after_last_gc_ = 0;
137 double GCTracer::last_gc_end_timestamp_ = 0.0; 144 double GCTracer::last_gc_end_timestamp_ = 0.0;
138 int GCTracer::max_gc_pause_ = 0; 145 int GCTracer::max_gc_pause_ = 0;
(...skipping 436 matching lines...) Expand 10 before | Expand all | Expand 10 after
575 PageWatermarkValidity validity) { 582 PageWatermarkValidity validity) {
576 PageIterator it(space, PageIterator::PAGES_IN_USE); 583 PageIterator it(space, PageIterator::PAGES_IN_USE);
577 bool expected_value = (validity == ALL_VALID); 584 bool expected_value = (validity == ALL_VALID);
578 while (it.has_next()) { 585 while (it.has_next()) {
579 Page* page = it.next(); 586 Page* page = it.next();
580 ASSERT(page->IsWatermarkValid() == expected_value); 587 ASSERT(page->IsWatermarkValid() == expected_value);
581 } 588 }
582 } 589 }
583 #endif 590 #endif
584 591
592 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
593 double survival_rate =
594 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
595 start_new_space_size;
596
597 if (survival_rate > kYoungSurvivalRateThreshold) {
598 high_survival_rate_period_length_++;
599 } else {
600 high_survival_rate_period_length_ = 0;
601 }
602
603 double survival_rate_diff = survival_rate_ - survival_rate;
604
605 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
606 set_survival_rate_trend(DECREASING);
607 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
608 set_survival_rate_trend(INCREASING);
609 } else {
610 set_survival_rate_trend(STABLE);
611 }
612
613 survival_rate_ = survival_rate;
614 }
585 615
586 void Heap::PerformGarbageCollection(AllocationSpace space, 616 void Heap::PerformGarbageCollection(AllocationSpace space,
587 GarbageCollector collector, 617 GarbageCollector collector,
588 GCTracer* tracer) { 618 GCTracer* tracer) {
589 VerifySymbolTable(); 619 VerifySymbolTable();
590 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { 620 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
591 ASSERT(!allocation_allowed_); 621 ASSERT(!allocation_allowed_);
592 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); 622 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
593 global_gc_prologue_callback_(); 623 global_gc_prologue_callback_();
594 } 624 }
595 625
596 GCType gc_type = 626 GCType gc_type =
597 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge; 627 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
598 628
599 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { 629 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
600 if (gc_type & gc_prologue_callbacks_[i].gc_type) { 630 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
601 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); 631 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
602 } 632 }
603 } 633 }
604 634
605 EnsureFromSpaceIsCommitted(); 635 EnsureFromSpaceIsCommitted();
606 636
637 int start_new_space_size = Heap::new_space()->Size();
638
607 if (collector == MARK_COMPACTOR) { 639 if (collector == MARK_COMPACTOR) {
608 if (FLAG_flush_code) { 640 if (FLAG_flush_code) {
609 // Flush all potentially unused code. 641 // Flush all potentially unused code.
610 FlushCode(); 642 FlushCode();
611 } 643 }
612 644
613 // Perform mark-sweep with optional compaction. 645 // Perform mark-sweep with optional compaction.
614 MarkCompact(tracer); 646 MarkCompact(tracer);
615 647
648 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
649 IsStableOrIncreasingSurvivalTrend();
650
651 UpdateSurvivalRateTrend(start_new_space_size);
652
616 int old_gen_size = PromotedSpaceSize(); 653 int old_gen_size = PromotedSpaceSize();
617 old_gen_promotion_limit_ = 654 old_gen_promotion_limit_ =
618 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); 655 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
619 old_gen_allocation_limit_ = 656 old_gen_allocation_limit_ =
620 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); 657 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
658
659 if (high_survival_rate_during_scavenges &&
660 IsStableOrIncreasingSurvivalTrend()) {
661 // Stable high survival rates of young objects both during partial and
662 // full collection indicate that mutator is either building or modifying
663 // a structure with a long lifetime.
664 // In this case we aggressively raise old generation memory limits to
665 // postpone subsequent mark-sweep collection and thus trade memory
666 // space for the mutation speed.
667 old_gen_promotion_limit_ *= 2;
668 old_gen_allocation_limit_ *= 2;
669 bumped_old_gen_limits_ = true;
670 }
671
621 old_gen_exhausted_ = false; 672 old_gen_exhausted_ = false;
622 } else { 673 } else {
623 tracer_ = tracer; 674 tracer_ = tracer;
624 Scavenge(); 675 Scavenge();
625 tracer_ = NULL; 676 tracer_ = NULL;
677
678 UpdateSurvivalRateTrend(start_new_space_size);
679
680 if (bumped_old_gen_limits_ &&
681 !IsHighSurvivalRate() &&
682 !IsIncreasingSurvivalTrend()) {
683 // We previously observed high survival rates in young space and decided
684 // to bump old space allocation limits to trade the space for the speed
685 // but now survival rates are dropping which indicates that mutator
686 // finished updating tenured data structure. So we can decrease old space
687 // limits to guarantee an early full GC cycle and reduce memory footprint.
688 old_gen_promotion_limit_ /= 2;
689 old_gen_allocation_limit_ /= 2;
690 bumped_old_gen_limits_ = false;
691 }
626 } 692 }
627 693
628 Counters::objs_since_last_young.Set(0); 694 Counters::objs_since_last_young.Set(0);
629 695
630 if (collector == MARK_COMPACTOR) { 696 if (collector == MARK_COMPACTOR) {
631 DisableAssertNoAllocation allow_allocation; 697 DisableAssertNoAllocation allow_allocation;
632 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); 698 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
633 GlobalHandles::PostGarbageCollectionProcessing(); 699 GlobalHandles::PostGarbageCollectionProcessing();
634 } 700 }
635 701
(...skipping 4133 matching lines...) Expand 10 before | Expand all | Expand 10 after
4769 void ExternalStringTable::TearDown() { 4835 void ExternalStringTable::TearDown() {
4770 new_space_strings_.Free(); 4836 new_space_strings_.Free();
4771 old_space_strings_.Free(); 4837 old_space_strings_.Free();
4772 } 4838 }
4773 4839
4774 4840
4775 List<Object*> ExternalStringTable::new_space_strings_; 4841 List<Object*> ExternalStringTable::new_space_strings_;
4776 List<Object*> ExternalStringTable::old_space_strings_; 4842 List<Object*> ExternalStringTable::old_space_strings_;
4777 4843
4778 } } // namespace v8::internal 4844 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698