OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 729 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
740 return; | 740 return; |
741 } | 741 } |
742 | 742 |
743 allocated_ += allocated_bytes; | 743 allocated_ += allocated_bytes; |
744 | 744 |
745 if (allocated_ < kAllocatedThreshold) return; | 745 if (allocated_ < kAllocatedThreshold) return; |
746 | 746 |
747 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; | 747 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; |
748 | 748 |
749 intptr_t bytes_to_process = allocated_ * allocation_marking_factor_; | 749 intptr_t bytes_to_process = allocated_ * allocation_marking_factor_; |
750 bytes_scanned_ += bytes_to_process; | |
ulan
2011/11/07 17:24:50
Should the heuristics below be active in SWEEPING
Erik Corry
2011/11/08 10:29:34
Added a reset of this variable when we start marki
| |
750 | 751 |
751 double start = 0; | 752 double start = 0; |
752 | 753 |
753 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { | 754 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { |
754 start = OS::TimeCurrentMillis(); | 755 start = OS::TimeCurrentMillis(); |
755 } | 756 } |
756 | 757 |
757 if (state_ == SWEEPING) { | 758 if (state_ == SWEEPING) { |
758 if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) && | 759 if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) && |
759 heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) { | 760 heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) { |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
801 if (marking_deque_.IsEmpty()) MarkingComplete(); | 802 if (marking_deque_.IsEmpty()) MarkingComplete(); |
802 } | 803 } |
803 | 804 |
804 allocated_ = 0; | 805 allocated_ = 0; |
805 | 806 |
806 steps_count_++; | 807 steps_count_++; |
807 steps_count_since_last_gc_++; | 808 steps_count_since_last_gc_++; |
808 | 809 |
809 bool speed_up = false; | 810 bool speed_up = false; |
810 | 811 |
812 if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) { | |
813 if (FLAG_trace_gc) { | |
814 PrintF("Speed up marking after %d steps\n", | |
815 static_cast<int>(kAllocationMarkingFactorSpeedupInterval)); | |
816 } | |
817 speed_up = true; | |
818 } | |
819 | |
811 if (old_generation_space_available_at_start_of_incremental_ < 10 * MB || | 820 if (old_generation_space_available_at_start_of_incremental_ < 10 * MB || |
812 SpaceLeftInOldSpace() < | 821 SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) < |
813 old_generation_space_available_at_start_of_incremental_ >> 1) { | 822 old_generation_space_available_at_start_of_incremental_) { |
814 // Half of the space that was available is gone while we were | 823 // 1/n of the space that was available is gone while we were |
ulan
2011/11/07 17:24:50
I think the condition says that 1/n is left and (n
Erik Corry
2011/11/08 10:29:34
Done.
| |
815 // incrementally marking. | 824 // incrementally marking. |
825 if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n"); | |
816 speed_up = true; | 826 speed_up = true; |
817 old_generation_space_available_at_start_of_incremental_ = | |
818 SpaceLeftInOldSpace(); | |
819 } | 827 } |
820 | 828 |
821 if (heap_->PromotedTotalSize() > | 829 if (heap_->PromotedTotalSize() > |
822 old_generation_space_used_at_start_of_incremental_ << 1) { | 830 (allocation_marking_factor_ + 1) * |
823 // Size of old space doubled while we were incrementally marking. | 831 old_generation_space_used_at_start_of_incremental_) { |
832 // Size of old space multiplied by n while we were incrementally marking. | |
824 speed_up = true; | 833 speed_up = true; |
825 old_generation_space_used_at_start_of_incremental_ = | 834 if (FLAG_trace_gc) { |
826 heap_->PromotedTotalSize(); | 835 PrintF("Speed up marking because of heap size increase\n"); |
836 } | |
827 } | 837 } |
828 | 838 |
829 if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0 && | 839 // We try to scan at at least twice the speed that we are allocating. |
830 allocation_marking_factor_ < kMaxAllocationMarkingFactor) { | 840 if ((bytes_scanned_ >> 1) |
841 + heap_->MaxSemiSpaceSize() // A single scavenge cannot trigger this. | |
842 + allocation_marking_factor_ * MB < // Delay before upping again. | |
ulan
2011/11/07 17:24:50
Possible overflow in allocation_marking_factor_ *
Erik Corry
2011/11/08 10:29:34
Reduced max allocation marking factor to 1000 to a
| |
843 heap_->PromotedTotalSize() | |
844 - old_generation_space_used_at_start_of_incremental_) { | |
845 if (FLAG_trace_gc) { | |
846 PrintF("Speed up marking because marker was not keeping up\n"); | |
847 } | |
831 speed_up = true; | 848 speed_up = true; |
832 } | 849 } |
833 | 850 |
834 if (speed_up) { | 851 if (speed_up) { |
835 allocation_marking_factor_ += kAllocationMarkingFactorSpeedup; | 852 allocation_marking_factor_ += kAllocationMarkingFactorSpeedup; |
836 allocation_marking_factor_ = | 853 allocation_marking_factor_ = |
837 static_cast<int>(allocation_marking_factor_ * 1.3); | 854 Min(kMaxAllocationMarkingFactor, |
855 static_cast<intptr_t>(allocation_marking_factor_ * 1.3)); | |
838 if (FLAG_trace_gc) { | 856 if (FLAG_trace_gc) { |
839 PrintF("Marking speed increased to %d\n", allocation_marking_factor_); | 857 PrintF("Marking speed increased to %d\n", allocation_marking_factor_); |
840 } | 858 } |
841 } | 859 } |
842 | 860 |
843 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { | 861 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { |
844 double end = OS::TimeCurrentMillis(); | 862 double end = OS::TimeCurrentMillis(); |
845 double delta = (end - start); | 863 double delta = (end - start); |
846 longest_step_ = Max(longest_step_, delta); | 864 longest_step_ = Max(longest_step_, delta); |
847 steps_took_ += delta; | 865 steps_took_ += delta; |
848 steps_took_since_last_gc_ += delta; | 866 steps_took_since_last_gc_ += delta; |
849 } | 867 } |
850 } | 868 } |
851 | 869 |
852 | 870 |
853 void IncrementalMarking::ResetStepCounters() { | 871 void IncrementalMarking::ResetStepCounters() { |
854 steps_count_ = 0; | 872 steps_count_ = 0; |
855 steps_took_ = 0; | 873 steps_took_ = 0; |
856 longest_step_ = 0.0; | 874 longest_step_ = 0.0; |
857 old_generation_space_available_at_start_of_incremental_ = | 875 old_generation_space_available_at_start_of_incremental_ = |
858 SpaceLeftInOldSpace(); | 876 SpaceLeftInOldSpace(); |
859 old_generation_space_used_at_start_of_incremental_ = | 877 old_generation_space_used_at_start_of_incremental_ = |
860 heap_->PromotedTotalSize(); | 878 heap_->PromotedTotalSize(); |
861 steps_count_since_last_gc_ = 0; | 879 steps_count_since_last_gc_ = 0; |
862 steps_took_since_last_gc_ = 0; | 880 steps_took_since_last_gc_ = 0; |
863 bytes_rescanned_ = 0; | 881 bytes_rescanned_ = 0; |
864 allocation_marking_factor_ = kInitialAllocationMarkingFactor; | 882 allocation_marking_factor_ = kInitialAllocationMarkingFactor; |
883 bytes_scanned_ = 0; | |
865 } | 884 } |
866 | 885 |
867 | 886 |
868 int64_t IncrementalMarking::SpaceLeftInOldSpace() { | 887 int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
869 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize(); | 888 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize(); |
870 } | 889 } |
871 | 890 |
872 } } // namespace v8::internal | 891 } } // namespace v8::internal |
OLD | NEW |