Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(443)

Side by Side Diff: src/heap/heap.cc

Issue 2310143002: [heap] Introduce enum of garbage collection reasons. (Closed)
Patch Set: rebase Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/context-slot-cache.h" 9 #include "src/ast/context-slot-cache.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 772 matching lines...) Expand 10 before | Expand all | Expand 10 after
783 }; 783 };
784 784
785 785
786 void Heap::HandleGCRequest() { 786 void Heap::HandleGCRequest() {
787 if (HighMemoryPressure()) { 787 if (HighMemoryPressure()) {
788 incremental_marking()->reset_request_type(); 788 incremental_marking()->reset_request_type();
789 CheckMemoryPressure(); 789 CheckMemoryPressure();
790 } else if (incremental_marking()->request_type() == 790 } else if (incremental_marking()->request_type() ==
791 IncrementalMarking::COMPLETE_MARKING) { 791 IncrementalMarking::COMPLETE_MARKING) {
792 incremental_marking()->reset_request_type(); 792 incremental_marking()->reset_request_type();
793 CollectAllGarbage(current_gc_flags_, "GC interrupt", 793 CollectAllGarbage(current_gc_flags_,
794 GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
794 current_gc_callback_flags_); 795 current_gc_callback_flags_);
795 } else if (incremental_marking()->request_type() == 796 } else if (incremental_marking()->request_type() ==
796 IncrementalMarking::FINALIZATION && 797 IncrementalMarking::FINALIZATION &&
797 incremental_marking()->IsMarking() && 798 incremental_marking()->IsMarking() &&
798 !incremental_marking()->finalize_marking_completed()) { 799 !incremental_marking()->finalize_marking_completed()) {
799 incremental_marking()->reset_request_type(); 800 incremental_marking()->reset_request_type();
800 FinalizeIncrementalMarking("GC interrupt: finalize incremental marking"); 801 FinalizeIncrementalMarking(
802 GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
801 } 803 }
802 } 804 }
803 805
804 806
805 void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) { 807 void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
806 scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated); 808 scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
807 } 809 }
808 810
809 811 void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
810 void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
811 if (FLAG_trace_incremental_marking) { 812 if (FLAG_trace_incremental_marking) {
812 isolate()->PrintWithTimestamp("[IncrementalMarking] (%s).\n", gc_reason); 813 isolate()->PrintWithTimestamp("[IncrementalMarking] (%s).\n", gc_reason);
813 } 814 }
814 815
815 HistogramTimerScope incremental_marking_scope( 816 HistogramTimerScope incremental_marking_scope(
816 isolate()->counters()->gc_incremental_marking_finalize()); 817 isolate()->counters()->gc_incremental_marking_finalize());
817 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize"); 818 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
818 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE); 819 TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
819 820
820 { 821 {
(...skipping 29 matching lines...) Expand all
850 return isolate_->counters()->gc_finalize_reduce_memory(); 851 return isolate_->counters()->gc_finalize_reduce_memory();
851 } else { 852 } else {
852 return isolate_->counters()->gc_finalize(); 853 return isolate_->counters()->gc_finalize();
853 } 854 }
854 } else { 855 } else {
855 return isolate_->counters()->gc_compactor(); 856 return isolate_->counters()->gc_compactor();
856 } 857 }
857 } 858 }
858 } 859 }
859 860
860 void Heap::CollectAllGarbage(int flags, const char* gc_reason, 861 void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
861 const v8::GCCallbackFlags gc_callback_flags) { 862 const v8::GCCallbackFlags gc_callback_flags) {
862 // Since we are ignoring the return value, the exact choice of space does 863 // Since we are ignoring the return value, the exact choice of space does
863 // not matter, so long as we do not specify NEW_SPACE, which would not 864 // not matter, so long as we do not specify NEW_SPACE, which would not
864 // cause a full GC. 865 // cause a full GC.
865 set_current_gc_flags(flags); 866 set_current_gc_flags(flags);
866 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags); 867 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
867 set_current_gc_flags(kNoGCFlags); 868 set_current_gc_flags(kNoGCFlags);
868 } 869 }
869 870
870 871 void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
871 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
872 // Since we are ignoring the return value, the exact choice of space does 872 // Since we are ignoring the return value, the exact choice of space does
873 // not matter, so long as we do not specify NEW_SPACE, which would not 873 // not matter, so long as we do not specify NEW_SPACE, which would not
874 // cause a full GC. 874 // cause a full GC.
875 // Major GC would invoke weak handle callbacks on weakly reachable 875 // Major GC would invoke weak handle callbacks on weakly reachable
876 // handles, but won't collect weakly reachable objects until next 876 // handles, but won't collect weakly reachable objects until next
877 // major GC. Therefore if we collect aggressively and weak handle callback 877 // major GC. Therefore if we collect aggressively and weak handle callback
878 // has been invoked, we rerun major GC to release objects which become 878 // has been invoked, we rerun major GC to release objects which become
879 // garbage. 879 // garbage.
880 // Note: as weak callbacks can execute arbitrary code, we cannot 880 // Note: as weak callbacks can execute arbitrary code, we cannot
881 // hope that eventually there will be no weak callbacks invocations. 881 // hope that eventually there will be no weak callbacks invocations.
(...skipping 13 matching lines...) Expand all
895 v8::kGCCallbackFlagCollectAllAvailableGarbage) && 895 v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
896 attempt + 1 >= kMinNumberOfAttempts) { 896 attempt + 1 >= kMinNumberOfAttempts) {
897 break; 897 break;
898 } 898 }
899 } 899 }
900 set_current_gc_flags(kNoGCFlags); 900 set_current_gc_flags(kNoGCFlags);
901 new_space_->Shrink(); 901 new_space_->Shrink();
902 UncommitFromSpace(); 902 UncommitFromSpace();
903 } 903 }
904 904
905 905 void Heap::ReportExternalMemoryPressure() {
906 void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
907 if (external_memory_ > 906 if (external_memory_ >
908 (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) { 907 (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
909 CollectAllGarbage( 908 CollectAllGarbage(
910 kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask, gc_reason, 909 kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
910 GarbageCollectionReason::kExternalMemoryPressure,
911 static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage | 911 static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
912 kGCCallbackFlagCollectAllExternalMemory)); 912 kGCCallbackFlagCollectAllExternalMemory));
913 return; 913 return;
914 } 914 }
915 if (incremental_marking()->IsStopped()) { 915 if (incremental_marking()->IsStopped()) {
916 if (incremental_marking()->CanBeActivated()) { 916 if (incremental_marking()->CanBeActivated()) {
917 StartIncrementalMarking( 917 StartIncrementalMarking(
918 i::Heap::kNoGCFlags, 918 i::Heap::kNoGCFlags, GarbageCollectionReason::kExternalMemoryPressure,
919 static_cast<GCCallbackFlags>( 919 static_cast<GCCallbackFlags>(
920 kGCCallbackFlagSynchronousPhantomCallbackProcessing | 920 kGCCallbackFlagSynchronousPhantomCallbackProcessing |
921 kGCCallbackFlagCollectAllExternalMemory), 921 kGCCallbackFlagCollectAllExternalMemory));
922 gc_reason);
923 } else { 922 } else {
924 CollectAllGarbage(i::Heap::kNoGCFlags, gc_reason, 923 CollectAllGarbage(i::Heap::kNoGCFlags,
924 GarbageCollectionReason::kExternalMemoryPressure,
925 kGCCallbackFlagSynchronousPhantomCallbackProcessing); 925 kGCCallbackFlagSynchronousPhantomCallbackProcessing);
926 } 926 }
927 } else { 927 } else {
928 // Incremental marking is turned on an has already been started. 928 // Incremental marking is turned on an has already been started.
929 const double pressure = 929 const double pressure =
930 static_cast<double>(external_memory_ - 930 static_cast<double>(external_memory_ -
931 external_memory_at_last_mark_compact_ - 931 external_memory_at_last_mark_compact_ -
932 kExternalAllocationSoftLimit) / 932 kExternalAllocationSoftLimit) /
933 external_memory_hard_limit(); 933 external_memory_hard_limit();
934 DCHECK_GE(1, pressure); 934 DCHECK_GE(1, pressure);
(...skipping 13 matching lines...) Expand all
948 // may be uninitialized memory behind top. We fill the remainder of the page 948 // may be uninitialized memory behind top. We fill the remainder of the page
949 // with a filler. 949 // with a filler.
950 Address to_top = new_space_->top(); 950 Address to_top = new_space_->top();
951 Page* page = Page::FromAddress(to_top - kPointerSize); 951 Page* page = Page::FromAddress(to_top - kPointerSize);
952 if (page->Contains(to_top)) { 952 if (page->Contains(to_top)) {
953 int remaining_in_page = static_cast<int>(page->area_end() - to_top); 953 int remaining_in_page = static_cast<int>(page->area_end() - to_top);
954 CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo); 954 CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
955 } 955 }
956 } 956 }
957 957
958 958 bool Heap::CollectGarbage(GarbageCollector collector,
959 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, 959 GarbageCollectionReason gc_reason,
960 const char* collector_reason, 960 const char* collector_reason,
961 const v8::GCCallbackFlags gc_callback_flags) { 961 const v8::GCCallbackFlags gc_callback_flags) {
962 // The VM is in the GC state until exiting this function. 962 // The VM is in the GC state until exiting this function.
963 VMState<GC> state(isolate_); 963 VMState<GC> state(isolate_);
964 964
965 #ifdef DEBUG 965 #ifdef DEBUG
966 // Reset the allocation timeout to the GC interval, but make sure to 966 // Reset the allocation timeout to the GC interval, but make sure to
967 // allow at least a few allocations after a collection. The reason 967 // allow at least a few allocations after a collection. The reason
968 // for this is that we have a lot of allocation sequences and we 968 // for this is that we have a lot of allocation sequences and we
969 // assume that a garbage collection will allow the subsequent 969 // assume that a garbage collection will allow the subsequent
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
1048 1048
1049 if (collector == MARK_COMPACTOR && 1049 if (collector == MARK_COMPACTOR &&
1050 (gc_callback_flags & (kGCCallbackFlagForced | 1050 (gc_callback_flags & (kGCCallbackFlagForced |
1051 kGCCallbackFlagCollectAllAvailableGarbage)) != 0) { 1051 kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1052 isolate()->CountUsage(v8::Isolate::kForcedGC); 1052 isolate()->CountUsage(v8::Isolate::kForcedGC);
1053 } 1053 }
1054 1054
1055 // Start incremental marking for the next cycle. The heap snapshot 1055 // Start incremental marking for the next cycle. The heap snapshot
1056 // generator needs incremental marking to stay off after it aborted. 1056 // generator needs incremental marking to stay off after it aborted.
1057 if (!ShouldAbortIncrementalMarking()) { 1057 if (!ShouldAbortIncrementalMarking()) {
1058 StartIncrementalMarkingIfNeeded(kNoGCFlags, kNoGCCallbackFlags, 1058 StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
1059 "GC epilogue"); 1059 kNoGCCallbackFlags);
1060 } 1060 }
1061 1061
1062 return next_gc_likely_to_collect_more; 1062 return next_gc_likely_to_collect_more;
1063 } 1063 }
1064 1064
1065 1065
1066 int Heap::NotifyContextDisposed(bool dependant_context) { 1066 int Heap::NotifyContextDisposed(bool dependant_context) {
1067 if (!dependant_context) { 1067 if (!dependant_context) {
1068 tracer()->ResetSurvivalEvents(); 1068 tracer()->ResetSurvivalEvents();
1069 old_generation_size_configured_ = false; 1069 old_generation_size_configured_ = false;
1070 MemoryReducer::Event event; 1070 MemoryReducer::Event event;
1071 event.type = MemoryReducer::kPossibleGarbage; 1071 event.type = MemoryReducer::kPossibleGarbage;
1072 event.time_ms = MonotonicallyIncreasingTimeInMs(); 1072 event.time_ms = MonotonicallyIncreasingTimeInMs();
1073 memory_reducer_->NotifyPossibleGarbage(event); 1073 memory_reducer_->NotifyPossibleGarbage(event);
1074 } 1074 }
1075 if (isolate()->concurrent_recompilation_enabled()) { 1075 if (isolate()->concurrent_recompilation_enabled()) {
1076 // Flush the queued recompilation tasks. 1076 // Flush the queued recompilation tasks.
1077 isolate()->optimizing_compile_dispatcher()->Flush(); 1077 isolate()->optimizing_compile_dispatcher()->Flush();
1078 } 1078 }
1079 AgeInlineCaches(); 1079 AgeInlineCaches();
1080 number_of_disposed_maps_ = retained_maps()->Length(); 1080 number_of_disposed_maps_ = retained_maps()->Length();
1081 tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs()); 1081 tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
1082 return ++contexts_disposed_; 1082 return ++contexts_disposed_;
1083 } 1083 }
1084 1084
1085
1086 void Heap::StartIncrementalMarking(int gc_flags, 1085 void Heap::StartIncrementalMarking(int gc_flags,
1087 const GCCallbackFlags gc_callback_flags, 1086 GarbageCollectionReason gc_reason,
1088 const char* reason) { 1087 GCCallbackFlags gc_callback_flags) {
1089 DCHECK(incremental_marking()->IsStopped()); 1088 DCHECK(incremental_marking()->IsStopped());
1090 set_current_gc_flags(gc_flags); 1089 set_current_gc_flags(gc_flags);
1091 current_gc_callback_flags_ = gc_callback_flags; 1090 current_gc_callback_flags_ = gc_callback_flags;
1092 incremental_marking()->Start(reason); 1091 incremental_marking()->Start(gc_reason);
1093 } 1092 }
1094 1093
1095 void Heap::StartIncrementalMarkingIfNeeded( 1094 void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
1096 int gc_flags, const GCCallbackFlags gc_callback_flags, const char* reason) { 1095 int gc_flags, const GCCallbackFlags gc_callback_flags) {
1097 if (incremental_marking()->IsStopped() && 1096 if (incremental_marking()->IsStopped() &&
1098 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { 1097 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
1099 StartIncrementalMarking(gc_flags, gc_callback_flags, reason); 1098 StartIncrementalMarking(gc_flags, GarbageCollectionReason::kAllocationLimit,
1099 gc_callback_flags);
1100 } 1100 }
1101 } 1101 }
1102 1102
1103 void Heap::StartIdleIncrementalMarking() { 1103 void Heap::StartIdleIncrementalMarking(GarbageCollectionReason gc_reason) {
1104 gc_idle_time_handler_->ResetNoProgressCounter(); 1104 gc_idle_time_handler_->ResetNoProgressCounter();
1105 StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags, 1105 StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
1106 "idle"); 1106 kNoGCCallbackFlags);
1107 } 1107 }
1108 1108
1109 1109
1110 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index, 1110 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
1111 int len) { 1111 int len) {
1112 if (len == 0) return; 1112 if (len == 0) return;
1113 1113
1114 DCHECK(array->map() != fixed_cow_array_map()); 1114 DCHECK(array->map() != fixed_cow_array_map());
1115 Object** dst_objects = array->data_start() + dst_index; 1115 Object** dst_objects = array->data_start() + dst_index;
1116 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize); 1116 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
1205 chunk.start = free_space_address; 1205 chunk.start = free_space_address;
1206 chunk.end = free_space_address + size; 1206 chunk.end = free_space_address + size;
1207 } else { 1207 } else {
1208 perform_gc = true; 1208 perform_gc = true;
1209 break; 1209 break;
1210 } 1210 }
1211 } 1211 }
1212 } 1212 }
1213 if (perform_gc) { 1213 if (perform_gc) {
1214 if (space == NEW_SPACE) { 1214 if (space == NEW_SPACE) {
1215 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space"); 1215 CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
1216 } else { 1216 } else {
1217 if (counter > 1) { 1217 if (counter > 1) {
1218 CollectAllGarbage( 1218 CollectAllGarbage(
1219 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, 1219 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1220 "failed to reserve space in paged or large " 1220 GarbageCollectionReason::kDeserializer);
1221 "object space, trying to reduce memory footprint");
1222 } else { 1221 } else {
1223 CollectAllGarbage( 1222 CollectAllGarbage(kAbortIncrementalMarkingMask,
1224 kAbortIncrementalMarkingMask, 1223 GarbageCollectionReason::kDeserializer);
1225 "failed to reserve space in paged or large object space");
1226 } 1224 }
1227 } 1225 }
1228 gc_performed = true; 1226 gc_performed = true;
1229 break; // Abort for-loop over spaces and retry. 1227 break; // Abort for-loop over spaces and retry.
1230 } 1228 }
1231 } 1229 }
1232 } 1230 }
1233 1231
1234 return !gc_performed; 1232 return !gc_performed;
1235 } 1233 }
(...skipping 2833 matching lines...) Expand 10 before | Expand all | Expand 10 after
4069 bool Heap::IsHeapIterable() { 4067 bool Heap::IsHeapIterable() {
4070 // TODO(hpayer): This function is not correct. Allocation folding in old 4068 // TODO(hpayer): This function is not correct. Allocation folding in old
4071 // space breaks the iterability. 4069 // space breaks the iterability.
4072 return new_space_top_after_last_gc_ == new_space()->top(); 4070 return new_space_top_after_last_gc_ == new_space()->top();
4073 } 4071 }
4074 4072
4075 4073
4076 void Heap::MakeHeapIterable() { 4074 void Heap::MakeHeapIterable() {
4077 DCHECK(AllowHeapAllocation::IsAllowed()); 4075 DCHECK(AllowHeapAllocation::IsAllowed());
4078 if (!IsHeapIterable()) { 4076 if (!IsHeapIterable()) {
4079 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable"); 4077 CollectAllGarbage(kMakeHeapIterableMask,
4078 GarbageCollectionReason::kMakeHeapIterable);
4080 } 4079 }
4081 if (mark_compact_collector()->sweeping_in_progress()) { 4080 if (mark_compact_collector()->sweeping_in_progress()) {
4082 mark_compact_collector()->EnsureSweepingCompleted(); 4081 mark_compact_collector()->EnsureSweepingCompleted();
4083 } 4082 }
4084 DCHECK(IsHeapIterable()); 4083 DCHECK(IsHeapIterable());
4085 } 4084 }
4086 4085
4087 4086
4088 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) { 4087 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
4089 const double kMinMutatorUtilization = 0.0; 4088 const double kMinMutatorUtilization = 0.0;
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
4204 4203
4205 bool Heap::MarkingDequesAreEmpty() { 4204 bool Heap::MarkingDequesAreEmpty() {
4206 return mark_compact_collector()->marking_deque()->IsEmpty() && 4205 return mark_compact_collector()->marking_deque()->IsEmpty() &&
4207 (!UsingEmbedderHeapTracer() || 4206 (!UsingEmbedderHeapTracer() ||
4208 (mark_compact_collector()->wrappers_to_trace() == 0 && 4207 (mark_compact_collector()->wrappers_to_trace() == 0 &&
4209 mark_compact_collector() 4208 mark_compact_collector()
4210 ->embedder_heap_tracer() 4209 ->embedder_heap_tracer()
4211 ->NumberOfWrappersToTrace() == 0)); 4210 ->NumberOfWrappersToTrace() == 0));
4212 } 4211 }
4213 4212
4214 void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) { 4213 void Heap::FinalizeIncrementalMarkingIfComplete(
4214 GarbageCollectionReason gc_reason) {
4215 if (incremental_marking()->IsMarking() && 4215 if (incremental_marking()->IsMarking() &&
4216 (incremental_marking()->IsReadyToOverApproximateWeakClosure() || 4216 (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4217 (!incremental_marking()->finalize_marking_completed() && 4217 (!incremental_marking()->finalize_marking_completed() &&
4218 MarkingDequesAreEmpty()))) { 4218 MarkingDequesAreEmpty()))) {
4219 FinalizeIncrementalMarking(comment); 4219 FinalizeIncrementalMarking(gc_reason);
4220 } else if (incremental_marking()->IsComplete() || 4220 } else if (incremental_marking()->IsComplete() ||
4221 (mark_compact_collector()->marking_deque()->IsEmpty())) { 4221 (mark_compact_collector()->marking_deque()->IsEmpty())) {
4222 CollectAllGarbage(current_gc_flags_, comment); 4222 CollectAllGarbage(current_gc_flags_, gc_reason);
4223 } 4223 }
4224 } 4224 }
4225 4225
4226 4226 bool Heap::TryFinalizeIdleIncrementalMarking(
4227 bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) { 4227 double idle_time_in_ms, GarbageCollectionReason gc_reason) {
4228 size_t size_of_objects = static_cast<size_t>(SizeOfObjects()); 4228 size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
4229 double final_incremental_mark_compact_speed_in_bytes_per_ms = 4229 double final_incremental_mark_compact_speed_in_bytes_per_ms =
4230 tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond(); 4230 tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
4231 if (incremental_marking()->IsReadyToOverApproximateWeakClosure() || 4231 if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4232 (!incremental_marking()->finalize_marking_completed() && 4232 (!incremental_marking()->finalize_marking_completed() &&
4233 MarkingDequesAreEmpty() && 4233 MarkingDequesAreEmpty() &&
4234 gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure( 4234 gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
4235 idle_time_in_ms))) { 4235 idle_time_in_ms))) {
4236 FinalizeIncrementalMarking( 4236 FinalizeIncrementalMarking(gc_reason);
4237 "Idle notification: finalize incremental marking");
4238 return true; 4237 return true;
4239 } else if (incremental_marking()->IsComplete() || 4238 } else if (incremental_marking()->IsComplete() ||
4240 (MarkingDequesAreEmpty() && 4239 (MarkingDequesAreEmpty() &&
4241 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact( 4240 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
4242 idle_time_in_ms, size_of_objects, 4241 idle_time_in_ms, size_of_objects,
4243 final_incremental_mark_compact_speed_in_bytes_per_ms))) { 4242 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4244 CollectAllGarbage(current_gc_flags_, 4243 CollectAllGarbage(current_gc_flags_, gc_reason);
4245 "idle notification: finalize incremental marking");
4246 return true; 4244 return true;
4247 } 4245 }
4248 return false; 4246 return false;
4249 } 4247 }
4250 4248
4251 void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) { 4249 void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
4252 // TODO(hpayer): We do not have to iterate reservations on black objects 4250 // TODO(hpayer): We do not have to iterate reservations on black objects
4253 // for marking. We just have to execute the special visiting side effect 4251 // for marking. We just have to execute the special visiting side effect
4254 // code that adds objects to global data structures, e.g. for array buffers. 4252 // code that adds objects to global data structures, e.g. for array buffers.
4255 4253
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
4310 ->NotifyIdleTaskProgress(); 4308 ->NotifyIdleTaskProgress();
4311 result = IncrementalMarkingJob::IdleTask::Step(this, deadline_in_ms) == 4309 result = IncrementalMarkingJob::IdleTask::Step(this, deadline_in_ms) ==
4312 IncrementalMarkingJob::IdleTask::kDone; 4310 IncrementalMarkingJob::IdleTask::kDone;
4313 } 4311 }
4314 break; 4312 break;
4315 } 4313 }
4316 case DO_FULL_GC: { 4314 case DO_FULL_GC: {
4317 DCHECK(contexts_disposed_ > 0); 4315 DCHECK(contexts_disposed_ > 0);
4318 HistogramTimerScope scope(isolate_->counters()->gc_context()); 4316 HistogramTimerScope scope(isolate_->counters()->gc_context());
4319 TRACE_EVENT0("v8", "V8.GCContext"); 4317 TRACE_EVENT0("v8", "V8.GCContext");
4320 CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed"); 4318 CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
4321 break; 4319 break;
4322 } 4320 }
4323 case DO_NOTHING: 4321 case DO_NOTHING:
4324 break; 4322 break;
4325 } 4323 }
4326 4324
4327 return result; 4325 return result;
4328 } 4326 }
4329 4327
4330 4328
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
4443 4441
4444 void Heap::CheckMemoryPressure() { 4442 void Heap::CheckMemoryPressure() {
4445 if (HighMemoryPressure()) { 4443 if (HighMemoryPressure()) {
4446 if (isolate()->concurrent_recompilation_enabled()) { 4444 if (isolate()->concurrent_recompilation_enabled()) {
4447 // The optimizing compiler may be unnecessarily holding on to memory. 4445 // The optimizing compiler may be unnecessarily holding on to memory.
4448 DisallowHeapAllocation no_recursive_gc; 4446 DisallowHeapAllocation no_recursive_gc;
4449 isolate()->optimizing_compile_dispatcher()->Flush(); 4447 isolate()->optimizing_compile_dispatcher()->Flush();
4450 } 4448 }
4451 } 4449 }
4452 if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) { 4450 if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
4453 CollectGarbageOnMemoryPressure("memory pressure"); 4451 CollectGarbageOnMemoryPressure();
4454 } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) { 4452 } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
4455 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) { 4453 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4456 StartIdleIncrementalMarking(); 4454 StartIncrementalMarking(kReduceMemoryFootprintMask,
4455 GarbageCollectionReason::kMemoryPressure);
4457 } 4456 }
4458 } 4457 }
4459 MemoryReducer::Event event; 4458 MemoryReducer::Event event;
4460 event.type = MemoryReducer::kPossibleGarbage; 4459 event.type = MemoryReducer::kPossibleGarbage;
4461 event.time_ms = MonotonicallyIncreasingTimeInMs(); 4460 event.time_ms = MonotonicallyIncreasingTimeInMs();
4462 memory_reducer_->NotifyPossibleGarbage(event); 4461 memory_reducer_->NotifyPossibleGarbage(event);
4463 } 4462 }
4464 4463
4465 void Heap::CollectGarbageOnMemoryPressure(const char* source) { 4464 void Heap::CollectGarbageOnMemoryPressure() {
4466 const int kGarbageThresholdInBytes = 8 * MB; 4465 const int kGarbageThresholdInBytes = 8 * MB;
4467 const double kGarbageThresholdAsFractionOfTotalMemory = 0.1; 4466 const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
4468 // This constant is the maximum response time in RAIL performance model. 4467 // This constant is the maximum response time in RAIL performance model.
4469 const double kMaxMemoryPressurePauseMs = 100; 4468 const double kMaxMemoryPressurePauseMs = 100;
4470 4469
4471 double start = MonotonicallyIncreasingTimeInMs(); 4470 double start = MonotonicallyIncreasingTimeInMs();
4472 CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, 4471 CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
4473 source, kGCCallbackFlagCollectAllAvailableGarbage); 4472 GarbageCollectionReason::kMemoryPressure,
4473 kGCCallbackFlagCollectAllAvailableGarbage);
4474 double end = MonotonicallyIncreasingTimeInMs(); 4474 double end = MonotonicallyIncreasingTimeInMs();
4475 4475
4476 // Estimate how much memory we can free. 4476 // Estimate how much memory we can free.
4477 int64_t potential_garbage = 4477 int64_t potential_garbage =
4478 (CommittedMemory() - SizeOfObjects()) + external_memory_; 4478 (CommittedMemory() - SizeOfObjects()) + external_memory_;
4479 // If we can potentially free large amount of memory, then start GC right 4479 // If we can potentially free large amount of memory, then start GC right
4480 // away instead of waiting for memory reducer. 4480 // away instead of waiting for memory reducer.
4481 if (potential_garbage >= kGarbageThresholdInBytes && 4481 if (potential_garbage >= kGarbageThresholdInBytes &&
4482 potential_garbage >= 4482 potential_garbage >=
4483 CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) { 4483 CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
4484 // If we spent less than half of the time budget, then perform full GC 4484 // If we spent less than half of the time budget, then perform full GC
4485 // Otherwise, start incremental marking. 4485 // Otherwise, start incremental marking.
4486 if (end - start < kMaxMemoryPressurePauseMs / 2) { 4486 if (end - start < kMaxMemoryPressurePauseMs / 2) {
4487 CollectAllGarbage( 4487 CollectAllGarbage(
4488 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, source, 4488 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
4489 GarbageCollectionReason::kMemoryPressure,
4489 kGCCallbackFlagCollectAllAvailableGarbage); 4490 kGCCallbackFlagCollectAllAvailableGarbage);
4490 } else { 4491 } else {
4491 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) { 4492 if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4492 StartIdleIncrementalMarking(); 4493 StartIncrementalMarking(kReduceMemoryFootprintMask,
4494 GarbageCollectionReason::kMemoryPressure);
4493 } 4495 }
4494 } 4496 }
4495 } 4497 }
4496 } 4498 }
4497 4499
4498 void Heap::MemoryPressureNotification(MemoryPressureLevel level, 4500 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
4499 bool is_isolate_locked) { 4501 bool is_isolate_locked) {
4500 MemoryPressureLevel previous = memory_pressure_level_.Value(); 4502 MemoryPressureLevel previous = memory_pressure_level_.Value();
4501 memory_pressure_level_.SetValue(level); 4503 memory_pressure_level_.SetValue(level);
4502 if ((previous != MemoryPressureLevel::kCritical && 4504 if ((previous != MemoryPressureLevel::kCritical &&
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
4568 code_space_->ReportStatistics(); 4570 code_space_->ReportStatistics();
4569 PrintF("Map space : "); 4571 PrintF("Map space : ");
4570 map_space_->ReportStatistics(); 4572 map_space_->ReportStatistics();
4571 PrintF("Large object space : "); 4573 PrintF("Large object space : ");
4572 lo_space_->ReportStatistics(); 4574 lo_space_->ReportStatistics();
4573 PrintF(">>>>>> ========================================= >>>>>>\n"); 4575 PrintF(">>>>>> ========================================= >>>>>>\n");
4574 } 4576 }
4575 4577
4576 #endif // DEBUG 4578 #endif // DEBUG
4577 4579
4580 const char* Heap::GarbageCollectionReasonToString(
4581 GarbageCollectionReason gc_reason) {
4582 switch (gc_reason) {
4583 case GarbageCollectionReason::kAllocationFailure:
4584 return "allocation failure";
4585 case GarbageCollectionReason::kAllocationLimit:
4586 return "allocation limit";
4587 case GarbageCollectionReason::kContextDisposal:
4588 return "context disposal";
4589 case GarbageCollectionReason::kCountersExtension:
4590 return "counters extension";
4591 case GarbageCollectionReason::kDebugger:
4592 return "debugger";
4593 case GarbageCollectionReason::kDeserializer:
4594 return "deserialize";
4595 case GarbageCollectionReason::kExternalMemoryPressure:
4596 return "external memory pressure";
4597 case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
4598 return "finalize incremental marking via stack guard";
4599 case GarbageCollectionReason::kFinalizeMarkingViaTask:
4600 return "finalize incremental marking via task";
4601 case GarbageCollectionReason::kFullHashtable:
4602 return "full hash-table";
4603 case GarbageCollectionReason::kHeapProfiler:
4604 return "heap profiler";
4605 case GarbageCollectionReason::kIdleTask:
4606 return "idle task";
4607 case GarbageCollectionReason::kLastResort:
4608 return "last resort";
4609 case GarbageCollectionReason::kLowMemoryNotification:
4610 return "low memory notification";
4611 case GarbageCollectionReason::kMakeHeapIterable:
4612 return "make heap iterable";
4613 case GarbageCollectionReason::kMemoryPressure:
4614 return "memory pressure";
4615 case GarbageCollectionReason::kMemoryReducer:
4616 return "memory reducer";
4617 case GarbageCollectionReason::kRuntime:
4618 return "runtime";
4619 case GarbageCollectionReason::kSamplingProfiler:
4620 return "sampling profiler";
4621 case GarbageCollectionReason::kSnapshotCreator:
4622 return "snapshot creator";
4623 case GarbageCollectionReason::kTesting:
4624 return "testing";
4625 case GarbageCollectionReason::kUnknown:
4626 return "unknown";
4627 }
4628 UNREACHABLE();
4629 return "";
4630 }
4631
4578 bool Heap::Contains(HeapObject* value) { 4632 bool Heap::Contains(HeapObject* value) {
4579 if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) { 4633 if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
4580 return false; 4634 return false;
4581 } 4635 }
4582 return HasBeenSetUp() && 4636 return HasBeenSetUp() &&
4583 (new_space_->ToSpaceContains(value) || old_space_->Contains(value) || 4637 (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
4584 code_space_->Contains(value) || map_space_->Contains(value) || 4638 code_space_->Contains(value) || map_space_->Contains(value) ||
4585 lo_space_->Contains(value)); 4639 lo_space_->Contains(value));
4586 } 4640 }
4587 4641
(...skipping 1887 matching lines...) Expand 10 before | Expand all | Expand 10 after
6475 } 6529 }
6476 6530
6477 6531
6478 // static 6532 // static
6479 int Heap::GetStaticVisitorIdForMap(Map* map) { 6533 int Heap::GetStaticVisitorIdForMap(Map* map) {
6480 return StaticVisitorBase::GetVisitorId(map); 6534 return StaticVisitorBase::GetVisitorId(map);
6481 } 6535 }
6482 6536
6483 } // namespace internal 6537 } // namespace internal
6484 } // namespace v8 6538 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698