| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/incremental-marking.h" | 7 #include "src/incremental-marking.h" |
| 8 | 8 |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
| (...skipping 835 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 846 allocated_ += allocated_bytes; | 846 allocated_ += allocated_bytes; |
| 847 | 847 |
| 848 if (allocated_ < kAllocatedThreshold && | 848 if (allocated_ < kAllocatedThreshold && |
| 849 write_barriers_invoked_since_last_step_ < | 849 write_barriers_invoked_since_last_step_ < |
| 850 kWriteBarriersInvokedThreshold) { | 850 kWriteBarriersInvokedThreshold) { |
| 851 return; | 851 return; |
| 852 } | 852 } |
| 853 | 853 |
| 854 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; | 854 if (state_ == MARKING && no_marking_scope_depth_ > 0) return; |
| 855 | 855 |
| 856 // The marking speed is driven either by the allocation rate or by the rate | 856 { |
| 857 // at which we are having to check the color of objects in the write barrier. | 857 HistogramTimerScope incremental_marking_scope( |
| 858 // It is possible for a tight non-allocating loop to run a lot of write | 858 heap_->isolate()->counters()->gc_incremental_marking()); |
| 859 // barriers before we get here and check them (marking can only take place on | 859 double start = base::OS::TimeCurrentMillis(); |
| 860 // allocation), so to reduce the lumpiness we don't use the write barriers | |
| 861 // invoked since last step directly to determine the amount of work to do. | |
| 862 intptr_t bytes_to_process = | |
| 863 marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_); | |
| 864 allocated_ = 0; | |
| 865 write_barriers_invoked_since_last_step_ = 0; | |
| 866 | 860 |
| 867 bytes_scanned_ += bytes_to_process; | 861 // The marking speed is driven either by the allocation rate or by the rate |
| 862 // at which we are having to check the color of objects in the write |
| 863 // barrier. |
| 864 // It is possible for a tight non-allocating loop to run a lot of write |
| 865 // barriers before we get here and check them (marking can only take place |
| 866 // on |
| 867 // allocation), so to reduce the lumpiness we don't use the write barriers |
| 868 // invoked since last step directly to determine the amount of work to do. |
| 869 intptr_t bytes_to_process = |
| 870 marking_speed_ * |
| 871 Max(allocated_, write_barriers_invoked_since_last_step_); |
| 872 allocated_ = 0; |
| 873 write_barriers_invoked_since_last_step_ = 0; |
| 868 | 874 |
| 869 double start = base::OS::TimeCurrentMillis(); | 875 bytes_scanned_ += bytes_to_process; |
| 870 | 876 |
| 871 if (state_ == SWEEPING) { | 877 if (state_ == SWEEPING) { |
| 872 if (heap_->mark_compact_collector()->sweeping_in_progress() && | 878 if (heap_->mark_compact_collector()->sweeping_in_progress() && |
| 873 heap_->mark_compact_collector()->IsSweepingCompleted()) { | 879 heap_->mark_compact_collector()->IsSweepingCompleted()) { |
| 874 heap_->mark_compact_collector()->EnsureSweepingCompleted(); | 880 heap_->mark_compact_collector()->EnsureSweepingCompleted(); |
| 881 } |
| 882 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { |
| 883 bytes_scanned_ = 0; |
| 884 StartMarking(PREVENT_COMPACTION); |
| 885 } |
| 886 } else if (state_ == MARKING) { |
| 887 ProcessMarkingDeque(bytes_to_process); |
| 888 if (marking_deque_.IsEmpty()) MarkingComplete(action); |
| 875 } | 889 } |
| 876 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { | 890 |
| 877 bytes_scanned_ = 0; | 891 steps_count_++; |
| 878 StartMarking(PREVENT_COMPACTION); | 892 |
| 893 bool speed_up = false; |
| 894 |
| 895 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { |
| 896 if (FLAG_trace_gc) { |
| 897 PrintPID("Speed up marking after %d steps\n", |
| 898 static_cast<int>(kMarkingSpeedAccellerationInterval)); |
| 899 } |
| 900 speed_up = true; |
| 879 } | 901 } |
| 880 } else if (state_ == MARKING) { | |
| 881 ProcessMarkingDeque(bytes_to_process); | |
| 882 if (marking_deque_.IsEmpty()) MarkingComplete(action); | |
| 883 } | |
| 884 | 902 |
| 885 steps_count_++; | 903 bool space_left_is_very_small = |
| 904 (old_generation_space_available_at_start_of_incremental_ < 10 * MB); |
| 886 | 905 |
| 887 bool speed_up = false; | 906 bool only_1_nth_of_space_that_was_available_still_left = |
| 907 (SpaceLeftInOldSpace() * (marking_speed_ + 1) < |
| 908 old_generation_space_available_at_start_of_incremental_); |
| 888 | 909 |
| 889 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { | 910 if (space_left_is_very_small || |
| 890 if (FLAG_trace_gc) { | 911 only_1_nth_of_space_that_was_available_still_left) { |
| 891 PrintPID("Speed up marking after %d steps\n", | 912 if (FLAG_trace_gc) |
| 892 static_cast<int>(kMarkingSpeedAccellerationInterval)); | 913 PrintPID("Speed up marking because of low space left\n"); |
| 914 speed_up = true; |
| 893 } | 915 } |
| 894 speed_up = true; | |
| 895 } | |
| 896 | 916 |
| 897 bool space_left_is_very_small = | 917 bool size_of_old_space_multiplied_by_n_during_marking = |
| 898 (old_generation_space_available_at_start_of_incremental_ < 10 * MB); | 918 (heap_->PromotedTotalSize() > |
| 899 | 919 (marking_speed_ + 1) * |
| 900 bool only_1_nth_of_space_that_was_available_still_left = | 920 old_generation_space_used_at_start_of_incremental_); |
| 901 (SpaceLeftInOldSpace() * (marking_speed_ + 1) < | 921 if (size_of_old_space_multiplied_by_n_during_marking) { |
| 902 old_generation_space_available_at_start_of_incremental_); | 922 speed_up = true; |
| 903 | |
| 904 if (space_left_is_very_small || | |
| 905 only_1_nth_of_space_that_was_available_still_left) { | |
| 906 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n"); | |
| 907 speed_up = true; | |
| 908 } | |
| 909 | |
| 910 bool size_of_old_space_multiplied_by_n_during_marking = | |
| 911 (heap_->PromotedTotalSize() > | |
| 912 (marking_speed_ + 1) * | |
| 913 old_generation_space_used_at_start_of_incremental_); | |
| 914 if (size_of_old_space_multiplied_by_n_during_marking) { | |
| 915 speed_up = true; | |
| 916 if (FLAG_trace_gc) { | |
| 917 PrintPID("Speed up marking because of heap size increase\n"); | |
| 918 } | |
| 919 } | |
| 920 | |
| 921 int64_t promoted_during_marking = heap_->PromotedTotalSize() | |
| 922 - old_generation_space_used_at_start_of_incremental_; | |
| 923 intptr_t delay = marking_speed_ * MB; | |
| 924 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); | |
| 925 | |
| 926 // We try to scan at at least twice the speed that we are allocating. | |
| 927 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { | |
| 928 if (FLAG_trace_gc) { | |
| 929 PrintPID("Speed up marking because marker was not keeping up\n"); | |
| 930 } | |
| 931 speed_up = true; | |
| 932 } | |
| 933 | |
| 934 if (speed_up) { | |
| 935 if (state_ != MARKING) { | |
| 936 if (FLAG_trace_gc) { | 923 if (FLAG_trace_gc) { |
| 937 PrintPID("Postponing speeding up marking until marking starts\n"); | 924 PrintPID("Speed up marking because of heap size increase\n"); |
| 938 } | |
| 939 } else { | |
| 940 marking_speed_ += kMarkingSpeedAccelleration; | |
| 941 marking_speed_ = static_cast<int>( | |
| 942 Min(kMaxMarkingSpeed, | |
| 943 static_cast<intptr_t>(marking_speed_ * 1.3))); | |
| 944 if (FLAG_trace_gc) { | |
| 945 PrintPID("Marking speed increased to %d\n", marking_speed_); | |
| 946 } | 925 } |
| 947 } | 926 } |
| 927 |
| 928 int64_t promoted_during_marking = |
| 929 heap_->PromotedTotalSize() - |
| 930 old_generation_space_used_at_start_of_incremental_; |
| 931 intptr_t delay = marking_speed_ * MB; |
| 932 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); |
| 933 |
| 934 // We try to scan at at least twice the speed that we are allocating. |
| 935 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { |
| 936 if (FLAG_trace_gc) { |
| 937 PrintPID("Speed up marking because marker was not keeping up\n"); |
| 938 } |
| 939 speed_up = true; |
| 940 } |
| 941 |
| 942 if (speed_up) { |
| 943 if (state_ != MARKING) { |
| 944 if (FLAG_trace_gc) { |
| 945 PrintPID("Postponing speeding up marking until marking starts\n"); |
| 946 } |
| 947 } else { |
| 948 marking_speed_ += kMarkingSpeedAccelleration; |
| 949 marking_speed_ = static_cast<int>( |
| 950 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3))); |
| 951 if (FLAG_trace_gc) { |
| 952 PrintPID("Marking speed increased to %d\n", marking_speed_); |
| 953 } |
| 954 } |
| 955 } |
| 956 |
| 957 double end = base::OS::TimeCurrentMillis(); |
| 958 double delta = (end - start); |
| 959 heap_->tracer()->AddIncrementalMarkingStep(delta); |
| 960 heap_->AddMarkingTime(delta); |
| 948 } | 961 } |
| 949 | |
| 950 double end = base::OS::TimeCurrentMillis(); | |
| 951 double delta = (end - start); | |
| 952 heap_->tracer()->AddIncrementalMarkingStep(delta); | |
| 953 heap_->AddMarkingTime(delta); | |
| 954 } | 962 } |
| 955 | 963 |
| 956 | 964 |
| 957 void IncrementalMarking::ResetStepCounters() { | 965 void IncrementalMarking::ResetStepCounters() { |
| 958 steps_count_ = 0; | 966 steps_count_ = 0; |
| 959 old_generation_space_available_at_start_of_incremental_ = | 967 old_generation_space_available_at_start_of_incremental_ = |
| 960 SpaceLeftInOldSpace(); | 968 SpaceLeftInOldSpace(); |
| 961 old_generation_space_used_at_start_of_incremental_ = | 969 old_generation_space_used_at_start_of_incremental_ = |
| 962 heap_->PromotedTotalSize(); | 970 heap_->PromotedTotalSize(); |
| 963 bytes_rescanned_ = 0; | 971 bytes_rescanned_ = 0; |
| 964 marking_speed_ = kInitialMarkingSpeed; | 972 marking_speed_ = kInitialMarkingSpeed; |
| 965 bytes_scanned_ = 0; | 973 bytes_scanned_ = 0; |
| 966 write_barriers_invoked_since_last_step_ = 0; | 974 write_barriers_invoked_since_last_step_ = 0; |
| 967 } | 975 } |
| 968 | 976 |
| 969 | 977 |
| 970 int64_t IncrementalMarking::SpaceLeftInOldSpace() { | 978 int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
| 971 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); | 979 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); |
| 972 } | 980 } |
| 973 | 981 |
| 974 } } // namespace v8::internal | 982 } } // namespace v8::internal |
| OLD | NEW |