OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/heap.h" | 5 #include "src/heap/heap.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/once.h" | 10 #include "src/base/once.h" |
(...skipping 31 matching lines...) Loading... |
42 namespace internal { | 42 namespace internal { |
43 | 43 |
44 | 44 |
45 struct Heap::StrongRootsList { | 45 struct Heap::StrongRootsList { |
46 Object** start; | 46 Object** start; |
47 Object** end; | 47 Object** end; |
48 StrongRootsList* next; | 48 StrongRootsList* next; |
49 }; | 49 }; |
50 | 50 |
51 | 51 |
| 52 DEFINE_OPERATORS_FOR_FLAGS(Heap::GCFlags) |
| 53 |
| 54 |
52 Heap::Heap() | 55 Heap::Heap() |
53 : amount_of_external_allocated_memory_(0), | 56 : amount_of_external_allocated_memory_(0), |
54 amount_of_external_allocated_memory_at_last_global_gc_(0), | 57 amount_of_external_allocated_memory_at_last_global_gc_(0), |
55 isolate_(NULL), | 58 isolate_(NULL), |
56 code_range_size_(0), | 59 code_range_size_(0), |
57 // semispace_size_ should be a power of 2 and old_generation_size_ should | 60 // semispace_size_ should be a power of 2 and old_generation_size_ should |
58 // be a multiple of Page::kPageSize. | 61 // be a multiple of Page::kPageSize. |
59 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), | 62 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), |
60 max_semi_space_size_(8 * (kPointerSize / 4) * MB), | 63 max_semi_space_size_(8 * (kPointerSize / 4) * MB), |
61 initial_semispace_size_(Page::kPageSize), | 64 initial_semispace_size_(Page::kPageSize), |
(...skipping 63 matching lines...) Loading... |
125 crankshaft_codegen_bytes_generated_(0), | 128 crankshaft_codegen_bytes_generated_(0), |
126 new_space_allocation_counter_(0), | 129 new_space_allocation_counter_(0), |
127 old_generation_allocation_counter_(0), | 130 old_generation_allocation_counter_(0), |
128 old_generation_size_at_last_gc_(0), | 131 old_generation_size_at_last_gc_(0), |
129 gcs_since_last_deopt_(0), | 132 gcs_since_last_deopt_(0), |
130 allocation_sites_scratchpad_length_(0), | 133 allocation_sites_scratchpad_length_(0), |
131 ring_buffer_full_(false), | 134 ring_buffer_full_(false), |
132 ring_buffer_end_(0), | 135 ring_buffer_end_(0), |
133 promotion_queue_(this), | 136 promotion_queue_(this), |
134 configured_(false), | 137 configured_(false), |
135 current_gc_flags_(Heap::kNoGCFlags), | 138 current_gc_flags_(kNoGCFlags), |
136 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags), | 139 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags), |
137 external_string_table_(this), | 140 external_string_table_(this), |
138 chunks_queued_for_free_(NULL), | 141 chunks_queued_for_free_(NULL), |
139 pending_unmap_job_semaphore_(0), | 142 pending_unmap_job_semaphore_(0), |
140 gc_callbacks_depth_(0), | 143 gc_callbacks_depth_(0), |
141 deserialization_complete_(false), | 144 deserialization_complete_(false), |
142 concurrent_sweeping_enabled_(false), | 145 concurrent_sweeping_enabled_(false), |
143 strong_roots_list_(NULL) { | 146 strong_roots_list_(NULL) { |
144 // Allow build-time customization of the max semispace size. Building | 147 // Allow build-time customization of the max semispace size. Building |
145 // V8 with snapshots and a non-default max semispace size is much | 148 // V8 with snapshots and a non-default max semispace size is much |
(...skipping 589 matching lines...) Loading... |
735 } | 738 } |
736 // We must not compact the weak fixed list here, as we may be in the middle | 739 // We must not compact the weak fixed list here, as we may be in the middle |
737 // of writing to it, when the GC triggered. Instead, we reset the root value. | 740 // of writing to it, when the GC triggered. Instead, we reset the root value. |
738 set_weak_stack_trace_list(Smi::FromInt(0)); | 741 set_weak_stack_trace_list(Smi::FromInt(0)); |
739 } | 742 } |
740 | 743 |
741 | 744 |
742 void Heap::HandleGCRequest() { | 745 void Heap::HandleGCRequest() { |
743 if (incremental_marking()->request_type() == | 746 if (incremental_marking()->request_type() == |
744 IncrementalMarking::COMPLETE_MARKING) { | 747 IncrementalMarking::COMPLETE_MARKING) { |
745 CollectAllGarbage(current_gc_flags_, "GC interrupt", | 748 CollectAllGarbage("GC interrupt", current_gc_flags_, |
746 current_gc_callback_flags_); | 749 current_gc_callback_flags_); |
747 return; | 750 return; |
748 } | 751 } |
749 DCHECK(FLAG_overapproximate_weak_closure); | 752 DCHECK(FLAG_overapproximate_weak_closure); |
750 if (!incremental_marking()->weak_closure_was_overapproximated()) { | 753 if (!incremental_marking()->weak_closure_was_overapproximated()) { |
751 OverApproximateWeakClosure("GC interrupt"); | 754 OverApproximateWeakClosure("GC interrupt"); |
752 } | 755 } |
753 } | 756 } |
754 | 757 |
755 | 758 |
(...skipping 23 matching lines...) Loading... |
779 AllowHeapAllocation allow_allocation; | 782 AllowHeapAllocation allow_allocation; |
780 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | 783 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); |
781 VMState<EXTERNAL> state(isolate_); | 784 VMState<EXTERNAL> state(isolate_); |
782 HandleScope handle_scope(isolate_); | 785 HandleScope handle_scope(isolate_); |
783 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags); | 786 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags); |
784 } | 787 } |
785 } | 788 } |
786 } | 789 } |
787 | 790 |
788 | 791 |
789 void Heap::CollectAllGarbage(int flags, const char* gc_reason, | 792 void Heap::CollectAllGarbage(const char* gc_reason, const GCFlags flags, |
790 const v8::GCCallbackFlags gc_callback_flags) { | 793 const v8::GCCallbackFlags gc_callback_flags) { |
791 // Since we are ignoring the return value, the exact choice of space does | 794 // Since we are ignoring the return value, the exact choice of space does |
792 // not matter, so long as we do not specify NEW_SPACE, which would not | 795 // not matter, so long as we do not specify NEW_SPACE, which would not |
793 // cause a full GC. | 796 // cause a full GC. |
794 set_current_gc_flags(flags); | 797 CollectGarbage(OLD_SPACE, gc_reason, flags, gc_callback_flags); |
795 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags); | |
796 set_current_gc_flags(kNoGCFlags); | |
797 } | 798 } |
798 | 799 |
799 | 800 |
800 void Heap::CollectAllAvailableGarbage(const char* gc_reason) { | 801 void Heap::CollectAllAvailableGarbage(const char* gc_reason) { |
801 // Since we are ignoring the return value, the exact choice of space does | 802 // Since we are ignoring the return value, the exact choice of space does |
802 // not matter, so long as we do not specify NEW_SPACE, which would not | 803 // not matter, so long as we do not specify NEW_SPACE, which would not |
803 // cause a full GC. | 804 // cause a full GC. |
804 // Major GC would invoke weak handle callbacks on weakly reachable | 805 // Major GC would invoke weak handle callbacks on weakly reachable |
805 // handles, but won't collect weakly reachable objects until next | 806 // handles, but won't collect weakly reachable objects until next |
806 // major GC. Therefore if we collect aggressively and weak handle callback | 807 // major GC. Therefore if we collect aggressively and weak handle callback |
807 // has been invoked, we rerun major GC to release objects which become | 808 // has been invoked, we rerun major GC to release objects which become |
808 // garbage. | 809 // garbage. |
809 // Note: as weak callbacks can execute arbitrary code, we cannot | 810 // Note: as weak callbacks can execute arbitrary code, we cannot |
810 // hope that eventually there will be no weak callbacks invocations. | 811 // hope that eventually there will be no weak callbacks invocations. |
811 // Therefore stop recollecting after several attempts. | 812 // Therefore stop recollecting after several attempts. |
812 if (isolate()->concurrent_recompilation_enabled()) { | 813 if (isolate()->concurrent_recompilation_enabled()) { |
813 // The optimizing compiler may be unnecessarily holding on to memory. | 814 // The optimizing compiler may be unnecessarily holding on to memory. |
814 DisallowHeapAllocation no_recursive_gc; | 815 DisallowHeapAllocation no_recursive_gc; |
815 isolate()->optimizing_compile_dispatcher()->Flush(); | 816 isolate()->optimizing_compile_dispatcher()->Flush(); |
816 } | 817 } |
817 isolate()->ClearSerializerData(); | 818 isolate()->ClearSerializerData(); |
818 set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask); | 819 isolate()->compilation_cache()->Clear(); |
819 isolate_->compilation_cache()->Clear(); | |
820 const int kMaxNumberOfAttempts = 7; | 820 const int kMaxNumberOfAttempts = 7; |
821 const int kMinNumberOfAttempts = 2; | 821 const int kMinNumberOfAttempts = 2; |
822 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { | 822 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { |
823 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL, | 823 if (!CollectGarbage(OLD_SPACE, gc_reason, |
824 v8::kGCCallbackFlagForced) && | 824 Heap::kAbortIncrementalMarkingMask | |
825 attempt + 1 >= kMinNumberOfAttempts) { | 825 Heap::kReduceMemoryFootprintMask, |
| 826 kGCCallbackFlagForced) && |
| 827 ((attempt + 1) >= kMinNumberOfAttempts)) { |
826 break; | 828 break; |
827 } | 829 } |
828 } | 830 } |
829 set_current_gc_flags(kNoGCFlags); | |
830 new_space_.Shrink(); | 831 new_space_.Shrink(); |
831 UncommitFromSpace(); | 832 UncommitFromSpace(); |
832 } | 833 } |
833 | 834 |
834 | 835 |
835 void Heap::EnsureFillerObjectAtTop() { | 836 void Heap::EnsureFillerObjectAtTop() { |
836 // There may be an allocation memento behind every object in new space. | 837 // There may be an allocation memento behind every object in new space. |
837 // If we evacuate a not full new space or if we are on the last page of | 838 // If we evacuate a not full new space or if we are on the last page of |
838 // the new space, then there may be uninitialized memory behind the top | 839 // the new space, then there may be uninitialized memory behind the top |
839 // pointer of the new space page. We store a filler object there to | 840 // pointer of the new space page. We store a filler object there to |
840 // identify the unused space. | 841 // identify the unused space. |
841 Address from_top = new_space_.top(); | 842 Address from_top = new_space_.top(); |
842 // Check that from_top is inside its page (i.e., not at the end). | 843 // Check that from_top is inside its page (i.e., not at the end). |
843 Address space_end = new_space_.ToSpaceEnd(); | 844 Address space_end = new_space_.ToSpaceEnd(); |
844 if (from_top < space_end) { | 845 if (from_top < space_end) { |
845 Page* page = Page::FromAddress(from_top); | 846 Page* page = Page::FromAddress(from_top); |
846 if (page->Contains(from_top)) { | 847 if (page->Contains(from_top)) { |
847 int remaining_in_page = static_cast<int>(page->area_end() - from_top); | 848 int remaining_in_page = static_cast<int>(page->area_end() - from_top); |
848 CreateFillerObjectAt(from_top, remaining_in_page); | 849 CreateFillerObjectAt(from_top, remaining_in_page); |
849 } | 850 } |
850 } | 851 } |
851 } | 852 } |
852 | 853 |
853 | 854 |
854 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, | 855 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, |
855 const char* collector_reason, | 856 const char* collector_reason) { |
856 const v8::GCCallbackFlags gc_callback_flags) { | |
857 // The VM is in the GC state until exiting this function. | 857 // The VM is in the GC state until exiting this function. |
858 VMState<GC> state(isolate_); | 858 VMState<GC> state(isolate_); |
859 | 859 |
860 #ifdef DEBUG | 860 #ifdef DEBUG |
861 // Reset the allocation timeout to the GC interval, but make sure to | 861 // Reset the allocation timeout to the GC interval, but make sure to |
862 // allow at least a few allocations after a collection. The reason | 862 // allow at least a few allocations after a collection. The reason |
863 // for this is that we have a lot of allocation sequences and we | 863 // for this is that we have a lot of allocation sequences and we |
864 // assume that a garbage collection will allow the subsequent | 864 // assume that a garbage collection will allow the subsequent |
865 // allocation attempts to go through. | 865 // allocation attempts to go through. |
866 allocation_timeout_ = Max(6, FLAG_gc_interval); | 866 allocation_timeout_ = Max(6, FLAG_gc_interval); |
(...skipping 34 matching lines...) Loading... |
901 { | 901 { |
902 tracer()->Start(collector, gc_reason, collector_reason); | 902 tracer()->Start(collector, gc_reason, collector_reason); |
903 DCHECK(AllowHeapAllocation::IsAllowed()); | 903 DCHECK(AllowHeapAllocation::IsAllowed()); |
904 DisallowHeapAllocation no_allocation_during_gc; | 904 DisallowHeapAllocation no_allocation_during_gc; |
905 GarbageCollectionPrologue(); | 905 GarbageCollectionPrologue(); |
906 | 906 |
907 { | 907 { |
908 HistogramTimerScope histogram_timer_scope( | 908 HistogramTimerScope histogram_timer_scope( |
909 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() | 909 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() |
910 : isolate_->counters()->gc_compactor()); | 910 : isolate_->counters()->gc_compactor()); |
911 next_gc_likely_to_collect_more = | 911 next_gc_likely_to_collect_more = PerformGarbageCollection(collector); |
912 PerformGarbageCollection(collector, gc_callback_flags); | |
913 } | 912 } |
914 | 913 |
915 GarbageCollectionEpilogue(); | 914 GarbageCollectionEpilogue(); |
916 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) { | 915 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) { |
917 isolate()->CheckDetachedContextsAfterGC(); | 916 isolate()->CheckDetachedContextsAfterGC(); |
918 } | 917 } |
919 | 918 |
920 if (collector == MARK_COMPACTOR) { | 919 if (collector == MARK_COMPACTOR) { |
921 intptr_t committed_memory_after = CommittedOldGenerationMemory(); | 920 intptr_t committed_memory_after = CommittedOldGenerationMemory(); |
922 intptr_t used_memory_after = PromotedSpaceSizeOfObjects(); | 921 intptr_t used_memory_after = PromotedSpaceSizeOfObjects(); |
(...skipping 10 matching lines...) Loading... |
933 (detached_contexts()->length() > 0); | 932 (detached_contexts()->length() > 0); |
934 if (deserialization_complete_) { | 933 if (deserialization_complete_) { |
935 memory_reducer_->NotifyMarkCompact(event); | 934 memory_reducer_->NotifyMarkCompact(event); |
936 } | 935 } |
937 } | 936 } |
938 | 937 |
939 tracer()->Stop(collector); | 938 tracer()->Stop(collector); |
940 } | 939 } |
941 | 940 |
942 if (collector == MARK_COMPACTOR && | 941 if (collector == MARK_COMPACTOR && |
943 (gc_callback_flags & kGCCallbackFlagForced) != 0) { | 942 (current_gc_callback_flags_ & kGCCallbackFlagForced) != 0) { |
944 isolate()->CountUsage(v8::Isolate::kForcedGC); | 943 isolate()->CountUsage(v8::Isolate::kForcedGC); |
945 } | 944 } |
946 | 945 |
947 // Start incremental marking for the next cycle. The heap snapshot | 946 // Start incremental marking for the next cycle. The heap snapshot |
948 // generator needs incremental marking to stay off after it aborted. | 947 // generator needs incremental marking to stay off after it aborted. |
949 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() && | 948 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() && |
950 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { | 949 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { |
951 StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue"); | 950 StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue"); |
952 } | 951 } |
953 | 952 |
(...skipping 14 matching lines...) Loading... |
968 set_retained_maps(ArrayList::cast(empty_fixed_array())); | 967 set_retained_maps(ArrayList::cast(empty_fixed_array())); |
969 tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis()); | 968 tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis()); |
970 MemoryReducer::Event event; | 969 MemoryReducer::Event event; |
971 event.type = MemoryReducer::kContextDisposed; | 970 event.type = MemoryReducer::kContextDisposed; |
972 event.time_ms = MonotonicallyIncreasingTimeInMs(); | 971 event.time_ms = MonotonicallyIncreasingTimeInMs(); |
973 memory_reducer_->NotifyContextDisposed(event); | 972 memory_reducer_->NotifyContextDisposed(event); |
974 return ++contexts_disposed_; | 973 return ++contexts_disposed_; |
975 } | 974 } |
976 | 975 |
977 | 976 |
978 void Heap::StartIncrementalMarking(int gc_flags, | 977 void Heap::StartIncrementalMarking(const GCFlags gc_flags, |
979 const GCCallbackFlags gc_callback_flags, | 978 const GCCallbackFlags gc_callback_flags, |
980 const char* reason) { | 979 const char* reason) { |
981 DCHECK(incremental_marking()->IsStopped()); | 980 DCHECK(incremental_marking()->IsStopped()); |
982 set_current_gc_flags(gc_flags); | 981 set_current_gc_flags(gc_flags); |
983 current_gc_callback_flags_ = gc_callback_flags; | 982 current_gc_callback_flags_ = gc_callback_flags; |
984 incremental_marking()->Start(reason); | 983 incremental_marking()->Start(reason); |
985 } | 984 } |
986 | 985 |
987 | 986 |
988 void Heap::StartIdleIncrementalMarking() { | 987 void Heap::StartIdleIncrementalMarking() { |
(...skipping 81 matching lines...) Loading... |
1070 chunk.start = free_space_address; | 1069 chunk.start = free_space_address; |
1071 chunk.end = free_space_address + size; | 1070 chunk.end = free_space_address + size; |
1072 } else { | 1071 } else { |
1073 perform_gc = true; | 1072 perform_gc = true; |
1074 break; | 1073 break; |
1075 } | 1074 } |
1076 } | 1075 } |
1077 } | 1076 } |
1078 if (perform_gc) { | 1077 if (perform_gc) { |
1079 if (space == NEW_SPACE) { | 1078 if (space == NEW_SPACE) { |
1080 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space"); | 1079 CollectGarbageNewSpace("failed to reserve space in the new space"); |
1081 } else { | 1080 } else { |
1082 if (counter > 1) { | 1081 if (counter > 1) { |
1083 CollectAllGarbage( | 1082 CollectAllGarbage( |
1084 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, | |
1085 "failed to reserve space in paged or large " | 1083 "failed to reserve space in paged or large " |
1086 "object space, trying to reduce memory footprint"); | 1084 "object space, trying to reduce memory footprint", |
| 1085 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask); |
1087 } else { | 1086 } else { |
1088 CollectAllGarbage( | 1087 CollectAllGarbage( |
1089 kAbortIncrementalMarkingMask, | 1088 "failed to reserve space in paged or large object space", |
1090 "failed to reserve space in paged or large object space"); | 1089 kAbortIncrementalMarkingMask); |
1091 } | 1090 } |
1092 } | 1091 } |
1093 gc_performed = true; | 1092 gc_performed = true; |
1094 break; // Abort for-loop over spaces and retry. | 1093 break; // Abort for-loop over spaces and retry. |
1095 } | 1094 } |
1096 } | 1095 } |
1097 } | 1096 } |
1098 | 1097 |
1099 return !gc_performed; | 1098 return !gc_performed; |
1100 } | 1099 } |
(...skipping 48 matching lines...) Loading... |
1149 | 1148 |
1150 double survival_rate = promotion_ratio_ + semi_space_copied_rate_; | 1149 double survival_rate = promotion_ratio_ + semi_space_copied_rate_; |
1151 tracer()->AddSurvivalRatio(survival_rate); | 1150 tracer()->AddSurvivalRatio(survival_rate); |
1152 if (survival_rate > kYoungSurvivalRateHighThreshold) { | 1151 if (survival_rate > kYoungSurvivalRateHighThreshold) { |
1153 high_survival_rate_period_length_++; | 1152 high_survival_rate_period_length_++; |
1154 } else { | 1153 } else { |
1155 high_survival_rate_period_length_ = 0; | 1154 high_survival_rate_period_length_ = 0; |
1156 } | 1155 } |
1157 } | 1156 } |
1158 | 1157 |
1159 bool Heap::PerformGarbageCollection( | 1158 |
1160 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { | 1159 bool Heap::PerformGarbageCollection(GarbageCollector collector) { |
1161 int freed_global_handles = 0; | 1160 int freed_global_handles = 0; |
1162 | 1161 |
1163 if (collector != SCAVENGER) { | 1162 if (collector != SCAVENGER) { |
1164 PROFILE(isolate_, CodeMovingGCEvent()); | 1163 PROFILE(isolate_, CodeMovingGCEvent()); |
1165 } | 1164 } |
1166 | 1165 |
1167 #ifdef VERIFY_HEAP | 1166 #ifdef VERIFY_HEAP |
1168 if (FLAG_verify_heap) { | 1167 if (FLAG_verify_heap) { |
1169 VerifyStringTable(this); | 1168 VerifyStringTable(this); |
1170 } | 1169 } |
(...skipping 56 matching lines...) Loading... |
1227 mark_compact_collector_.EnsureMarkingDequeIsCommitted( | 1226 mark_compact_collector_.EnsureMarkingDequeIsCommitted( |
1228 MarkCompactCollector::kMinMarkingDequeSize); | 1227 MarkCompactCollector::kMinMarkingDequeSize); |
1229 } | 1228 } |
1230 | 1229 |
1231 gc_post_processing_depth_++; | 1230 gc_post_processing_depth_++; |
1232 { | 1231 { |
1233 AllowHeapAllocation allow_allocation; | 1232 AllowHeapAllocation allow_allocation; |
1234 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | 1233 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); |
1235 freed_global_handles = | 1234 freed_global_handles = |
1236 isolate_->global_handles()->PostGarbageCollectionProcessing( | 1235 isolate_->global_handles()->PostGarbageCollectionProcessing( |
1237 collector, gc_callback_flags); | 1236 collector, current_gc_callback_flags_); |
1238 } | 1237 } |
1239 gc_post_processing_depth_--; | 1238 gc_post_processing_depth_--; |
1240 | 1239 |
1241 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); | 1240 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); |
1242 | 1241 |
1243 // Update relocatables. | 1242 // Update relocatables. |
1244 Relocatable::PostGarbageCollectionProcessing(isolate_); | 1243 Relocatable::PostGarbageCollectionProcessing(isolate_); |
1245 | 1244 |
1246 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond(); | 1245 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond(); |
1247 double mutator_speed = static_cast<double>( | 1246 double mutator_speed = static_cast<double>( |
(...skipping 10 matching lines...) Loading... |
1258 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); | 1257 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); |
1259 } | 1258 } |
1260 | 1259 |
1261 { | 1260 { |
1262 GCCallbacksScope scope(this); | 1261 GCCallbacksScope scope(this); |
1263 if (scope.CheckReenter()) { | 1262 if (scope.CheckReenter()) { |
1264 AllowHeapAllocation allow_allocation; | 1263 AllowHeapAllocation allow_allocation; |
1265 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | 1264 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); |
1266 VMState<EXTERNAL> state(isolate_); | 1265 VMState<EXTERNAL> state(isolate_); |
1267 HandleScope handle_scope(isolate_); | 1266 HandleScope handle_scope(isolate_); |
1268 CallGCEpilogueCallbacks(gc_type, gc_callback_flags); | 1267 CallGCEpilogueCallbacks(gc_type, current_gc_callback_flags_); |
1269 } | 1268 } |
1270 } | 1269 } |
1271 | 1270 |
1272 #ifdef VERIFY_HEAP | 1271 #ifdef VERIFY_HEAP |
1273 if (FLAG_verify_heap) { | 1272 if (FLAG_verify_heap) { |
1274 VerifyStringTable(this); | 1273 VerifyStringTable(this); |
1275 } | 1274 } |
1276 #endif | 1275 #endif |
1277 | 1276 |
1278 return freed_global_handles > 0; | 1277 return freed_global_handles > 0; |
(...skipping 3228 matching lines...) Loading... |
4507 bool Heap::IsHeapIterable() { | 4506 bool Heap::IsHeapIterable() { |
4508 // TODO(hpayer): This function is not correct. Allocation folding in old | 4507 // TODO(hpayer): This function is not correct. Allocation folding in old |
4509 // space breaks the iterability. | 4508 // space breaks the iterability. |
4510 return new_space_top_after_last_gc_ == new_space()->top(); | 4509 return new_space_top_after_last_gc_ == new_space()->top(); |
4511 } | 4510 } |
4512 | 4511 |
4513 | 4512 |
4514 void Heap::MakeHeapIterable() { | 4513 void Heap::MakeHeapIterable() { |
4515 DCHECK(AllowHeapAllocation::IsAllowed()); | 4514 DCHECK(AllowHeapAllocation::IsAllowed()); |
4516 if (!IsHeapIterable()) { | 4515 if (!IsHeapIterable()) { |
4517 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable"); | 4516 CollectAllGarbage("Heap::MakeHeapIterable", kMakeHeapIterableMask); |
4518 } | 4517 } |
4519 if (mark_compact_collector()->sweeping_in_progress()) { | 4518 if (mark_compact_collector()->sweeping_in_progress()) { |
4520 mark_compact_collector()->EnsureSweepingCompleted(); | 4519 mark_compact_collector()->EnsureSweepingCompleted(); |
4521 } | 4520 } |
4522 DCHECK(IsHeapIterable()); | 4521 DCHECK(IsHeapIterable()); |
4523 } | 4522 } |
4524 | 4523 |
4525 | 4524 |
4526 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) { | 4525 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) { |
4527 const double kMinMutatorUtilization = 0.0; | 4526 const double kMinMutatorUtilization = 0.0; |
(...skipping 104 matching lines...) Loading... |
4632 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure( | 4631 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure( |
4633 static_cast<size_t>(idle_time_in_ms))))) { | 4632 static_cast<size_t>(idle_time_in_ms))))) { |
4634 OverApproximateWeakClosure( | 4633 OverApproximateWeakClosure( |
4635 "Idle notification: overapproximate weak closure"); | 4634 "Idle notification: overapproximate weak closure"); |
4636 return true; | 4635 return true; |
4637 } else if (incremental_marking()->IsComplete() || | 4636 } else if (incremental_marking()->IsComplete() || |
4638 (mark_compact_collector_.marking_deque()->IsEmpty() && | 4637 (mark_compact_collector_.marking_deque()->IsEmpty() && |
4639 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact( | 4638 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact( |
4640 static_cast<size_t>(idle_time_in_ms), size_of_objects, | 4639 static_cast<size_t>(idle_time_in_ms), size_of_objects, |
4641 final_incremental_mark_compact_speed_in_bytes_per_ms))) { | 4640 final_incremental_mark_compact_speed_in_bytes_per_ms))) { |
4642 CollectAllGarbage(current_gc_flags_, | 4641 CollectAllGarbage("idle notification: finalize incremental", |
4643 "idle notification: finalize incremental"); | 4642 current_gc_flags_); |
4644 return true; | 4643 return true; |
4645 } | 4644 } |
4646 return false; | 4645 return false; |
4647 } | 4646 } |
4648 | 4647 |
4649 | 4648 |
4650 GCIdleTimeHandler::HeapState Heap::ComputeHeapState() { | 4649 GCIdleTimeHandler::HeapState Heap::ComputeHeapState() { |
4651 GCIdleTimeHandler::HeapState heap_state; | 4650 GCIdleTimeHandler::HeapState heap_state; |
4652 heap_state.contexts_disposed = contexts_disposed_; | 4651 heap_state.contexts_disposed = contexts_disposed_; |
4653 heap_state.contexts_disposal_rate = | 4652 heap_state.contexts_disposal_rate = |
(...skipping 62 matching lines...) Loading... |
4716 if (remaining_idle_time_in_ms > 0.0) { | 4715 if (remaining_idle_time_in_ms > 0.0) { |
4717 action.additional_work = TryFinalizeIdleIncrementalMarking( | 4716 action.additional_work = TryFinalizeIdleIncrementalMarking( |
4718 remaining_idle_time_in_ms, heap_state.size_of_objects, | 4717 remaining_idle_time_in_ms, heap_state.size_of_objects, |
4719 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms); | 4718 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms); |
4720 } | 4719 } |
4721 break; | 4720 break; |
4722 } | 4721 } |
4723 case DO_FULL_GC: { | 4722 case DO_FULL_GC: { |
4724 DCHECK(contexts_disposed_ > 0); | 4723 DCHECK(contexts_disposed_ > 0); |
4725 HistogramTimerScope scope(isolate_->counters()->gc_context()); | 4724 HistogramTimerScope scope(isolate_->counters()->gc_context()); |
4726 CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed"); | 4725 CollectAllGarbage("idle notification: contexts disposed", kNoGCFlags); |
4727 break; | 4726 break; |
4728 } | 4727 } |
4729 case DO_SCAVENGE: | 4728 case DO_SCAVENGE: |
4730 CollectGarbage(NEW_SPACE, "idle notification: scavenge"); | 4729 CollectGarbageNewSpace("idle notification: scavenge"); |
4731 break; | 4730 break; |
4732 case DO_FINALIZE_SWEEPING: | 4731 case DO_FINALIZE_SWEEPING: |
4733 mark_compact_collector()->EnsureSweepingCompleted(); | 4732 mark_compact_collector()->EnsureSweepingCompleted(); |
4734 break; | 4733 break; |
4735 case DO_NOTHING: | 4734 case DO_NOTHING: |
4736 break; | 4735 break; |
4737 } | 4736 } |
4738 | 4737 |
4739 return result; | 4738 return result; |
4740 } | 4739 } |
(...skipping 2034 matching lines...) Loading... |
6775 *object_type = "CODE_TYPE"; \ | 6774 *object_type = "CODE_TYPE"; \ |
6776 *object_sub_type = "CODE_AGE/" #name; \ | 6775 *object_sub_type = "CODE_AGE/" #name; \ |
6777 return true; | 6776 return true; |
6778 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) | 6777 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) |
6779 #undef COMPARE_AND_RETURN_NAME | 6778 #undef COMPARE_AND_RETURN_NAME |
6780 } | 6779 } |
6781 return false; | 6780 return false; |
6782 } | 6781 } |
6783 } // namespace internal | 6782 } // namespace internal |
6784 } // namespace v8 | 6783 } // namespace v8 |
OLD | NEW |