OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/heap.h" | 5 #include "src/heap/heap.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/once.h" | 10 #include "src/base/once.h" |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
42 namespace internal { | 42 namespace internal { |
43 | 43 |
44 | 44 |
45 struct Heap::StrongRootsList { | 45 struct Heap::StrongRootsList { |
46 Object** start; | 46 Object** start; |
47 Object** end; | 47 Object** end; |
48 StrongRootsList* next; | 48 StrongRootsList* next; |
49 }; | 49 }; |
50 | 50 |
51 | 51 |
52 DEFINE_OPERATORS_FOR_FLAGS(Heap::GCFlags); | |
53 | |
54 | |
52 Heap::Heap() | 55 Heap::Heap() |
53 : amount_of_external_allocated_memory_(0), | 56 : amount_of_external_allocated_memory_(0), |
54 amount_of_external_allocated_memory_at_last_global_gc_(0), | 57 amount_of_external_allocated_memory_at_last_global_gc_(0), |
55 isolate_(NULL), | 58 isolate_(NULL), |
56 code_range_size_(0), | 59 code_range_size_(0), |
57 // semispace_size_ should be a power of 2 and old_generation_size_ should | 60 // semispace_size_ should be a power of 2 and old_generation_size_ should |
58 // be a multiple of Page::kPageSize. | 61 // be a multiple of Page::kPageSize. |
59 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), | 62 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), |
60 max_semi_space_size_(8 * (kPointerSize / 4) * MB), | 63 max_semi_space_size_(8 * (kPointerSize / 4) * MB), |
61 initial_semispace_size_(Page::kPageSize), | 64 initial_semispace_size_(Page::kPageSize), |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
125 crankshaft_codegen_bytes_generated_(0), | 128 crankshaft_codegen_bytes_generated_(0), |
126 new_space_allocation_counter_(0), | 129 new_space_allocation_counter_(0), |
127 old_generation_allocation_counter_(0), | 130 old_generation_allocation_counter_(0), |
128 old_generation_size_at_last_gc_(0), | 131 old_generation_size_at_last_gc_(0), |
129 gcs_since_last_deopt_(0), | 132 gcs_since_last_deopt_(0), |
130 allocation_sites_scratchpad_length_(0), | 133 allocation_sites_scratchpad_length_(0), |
131 ring_buffer_full_(false), | 134 ring_buffer_full_(false), |
132 ring_buffer_end_(0), | 135 ring_buffer_end_(0), |
133 promotion_queue_(this), | 136 promotion_queue_(this), |
134 configured_(false), | 137 configured_(false), |
135 current_gc_flags_(Heap::kNoGCFlags), | 138 current_gc_flags_(kNoGCFlags), |
136 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags), | 139 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags), |
137 external_string_table_(this), | 140 external_string_table_(this), |
138 chunks_queued_for_free_(NULL), | 141 chunks_queued_for_free_(NULL), |
139 pending_unmap_job_semaphore_(0), | 142 pending_unmap_job_semaphore_(0), |
140 gc_callbacks_depth_(0), | 143 gc_callbacks_depth_(0), |
141 deserialization_complete_(false), | 144 deserialization_complete_(false), |
142 concurrent_sweeping_enabled_(false), | 145 concurrent_sweeping_enabled_(false), |
143 strong_roots_list_(NULL) { | 146 strong_roots_list_(NULL) { |
144 // Allow build-time customization of the max semispace size. Building | 147 // Allow build-time customization of the max semispace size. Building |
145 // V8 with snapshots and a non-default max semispace size is much | 148 // V8 with snapshots and a non-default max semispace size is much |
(...skipping 589 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
735 } | 738 } |
736 // We must not compact the weak fixed list here, as we may be in the middle | 739 // We must not compact the weak fixed list here, as we may be in the middle |
737 // of writing to it, when the GC triggered. Instead, we reset the root value. | 740 // of writing to it, when the GC triggered. Instead, we reset the root value. |
738 set_weak_stack_trace_list(Smi::FromInt(0)); | 741 set_weak_stack_trace_list(Smi::FromInt(0)); |
739 } | 742 } |
740 | 743 |
741 | 744 |
742 void Heap::HandleGCRequest() { | 745 void Heap::HandleGCRequest() { |
743 if (incremental_marking()->request_type() == | 746 if (incremental_marking()->request_type() == |
744 IncrementalMarking::COMPLETE_MARKING) { | 747 IncrementalMarking::COMPLETE_MARKING) { |
745 CollectAllGarbage(current_gc_flags_, "GC interrupt", | 748 CollectAllGarbage("GC interrupt", current_gc_flags_, |
746 current_gc_callback_flags_); | 749 current_gc_callback_flags_); |
747 return; | 750 return; |
748 } | 751 } |
749 DCHECK(FLAG_overapproximate_weak_closure); | 752 DCHECK(FLAG_overapproximate_weak_closure); |
750 if (!incremental_marking()->weak_closure_was_overapproximated()) { | 753 if (!incremental_marking()->weak_closure_was_overapproximated()) { |
751 OverApproximateWeakClosure("GC interrupt"); | 754 OverApproximateWeakClosure("GC interrupt"); |
752 } | 755 } |
753 } | 756 } |
754 | 757 |
755 | 758 |
(...skipping 23 matching lines...) Expand all Loading... | |
779 AllowHeapAllocation allow_allocation; | 782 AllowHeapAllocation allow_allocation; |
780 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | 783 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); |
781 VMState<EXTERNAL> state(isolate_); | 784 VMState<EXTERNAL> state(isolate_); |
782 HandleScope handle_scope(isolate_); | 785 HandleScope handle_scope(isolate_); |
783 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags); | 786 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags); |
784 } | 787 } |
785 } | 788 } |
786 } | 789 } |
787 | 790 |
788 | 791 |
789 void Heap::CollectAllGarbage(int flags, const char* gc_reason, | 792 void Heap::CollectAllGarbage(const char* gc_reason, const GCFlags flags, |
790 const v8::GCCallbackFlags gc_callback_flags) { | 793 const v8::GCCallbackFlags gc_callback_flags) { |
791 // Since we are ignoring the return value, the exact choice of space does | 794 // Since we are ignoring the return value, the exact choice of space does |
792 // not matter, so long as we do not specify NEW_SPACE, which would not | 795 // not matter, so long as we do not specify NEW_SPACE, which would not |
793 // cause a full GC. | 796 // cause a full GC. |
794 set_current_gc_flags(flags); | 797 CollectGarbage(OLD_SPACE, gc_reason, flags, gc_callback_flags); |
795 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags); | |
796 set_current_gc_flags(kNoGCFlags); | |
797 } | 798 } |
798 | 799 |
799 | 800 |
800 void Heap::CollectAllAvailableGarbage(const char* gc_reason) { | 801 void Heap::CollectAllAvailableGarbage(const char* gc_reason) { |
801 // Since we are ignoring the return value, the exact choice of space does | 802 // Since we are ignoring the return value, the exact choice of space does |
802 // not matter, so long as we do not specify NEW_SPACE, which would not | 803 // not matter, so long as we do not specify NEW_SPACE, which would not |
803 // cause a full GC. | 804 // cause a full GC. |
804 // Major GC would invoke weak handle callbacks on weakly reachable | 805 // Major GC would invoke weak handle callbacks on weakly reachable |
805 // handles, but won't collect weakly reachable objects until next | 806 // handles, but won't collect weakly reachable objects until next |
806 // major GC. Therefore if we collect aggressively and weak handle callback | 807 // major GC. Therefore if we collect aggressively and weak handle callback |
807 // has been invoked, we rerun major GC to release objects which become | 808 // has been invoked, we rerun major GC to release objects which become |
808 // garbage. | 809 // garbage. |
809 // Note: as weak callbacks can execute arbitrary code, we cannot | 810 // Note: as weak callbacks can execute arbitrary code, we cannot |
810 // hope that eventually there will be no weak callbacks invocations. | 811 // hope that eventually there will be no weak callbacks invocations. |
811 // Therefore stop recollecting after several attempts. | 812 // Therefore stop recollecting after several attempts. |
812 if (isolate()->concurrent_recompilation_enabled()) { | 813 if (isolate()->concurrent_recompilation_enabled()) { |
813 // The optimizing compiler may be unnecessarily holding on to memory. | 814 // The optimizing compiler may be unnecessarily holding on to memory. |
814 DisallowHeapAllocation no_recursive_gc; | 815 DisallowHeapAllocation no_recursive_gc; |
815 isolate()->optimizing_compile_dispatcher()->Flush(); | 816 isolate()->optimizing_compile_dispatcher()->Flush(); |
816 } | 817 } |
817 isolate()->ClearSerializerData(); | 818 isolate()->ClearSerializerData(); |
818 set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask); | 819 isolate()->compilation_cache()->Clear(); |
819 isolate_->compilation_cache()->Clear(); | |
820 const int kMaxNumberOfAttempts = 7; | 820 const int kMaxNumberOfAttempts = 7; |
821 const int kMinNumberOfAttempts = 2; | 821 const int kMinNumberOfAttempts = 2; |
822 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { | 822 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { |
823 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL, | 823 if (!CollectGarbage( |
824 v8::kGCCallbackFlagForced) && | 824 OLD_SPACE, gc_reason, |
825 attempt + 1 >= kMinNumberOfAttempts) { | 825 Heap::kMakeHeapIterableMask | Heap::kReduceMemoryFootprintMask, |
Hannes Payer (out of office)
2015/08/25 16:55:22
Let's rename this instance to kAbortIncrementalMar
Michael Lippautz
2015/08/25 17:13:49
Done.
| |
826 kGCCallbackFlagForced) && | |
827 ((attempt + 1) >= kMinNumberOfAttempts)) { | |
826 break; | 828 break; |
827 } | 829 } |
828 } | 830 } |
829 set_current_gc_flags(kNoGCFlags); | |
830 new_space_.Shrink(); | 831 new_space_.Shrink(); |
831 UncommitFromSpace(); | 832 UncommitFromSpace(); |
832 } | 833 } |
833 | 834 |
834 | 835 |
835 void Heap::EnsureFillerObjectAtTop() { | 836 void Heap::EnsureFillerObjectAtTop() { |
836 // There may be an allocation memento behind every object in new space. | 837 // There may be an allocation memento behind every object in new space. |
837 // If we evacuate a not full new space or if we are on the last page of | 838 // If we evacuate a not full new space or if we are on the last page of |
838 // the new space, then there may be uninitialized memory behind the top | 839 // the new space, then there may be uninitialized memory behind the top |
839 // pointer of the new space page. We store a filler object there to | 840 // pointer of the new space page. We store a filler object there to |
840 // identify the unused space. | 841 // identify the unused space. |
841 Address from_top = new_space_.top(); | 842 Address from_top = new_space_.top(); |
842 // Check that from_top is inside its page (i.e., not at the end). | 843 // Check that from_top is inside its page (i.e., not at the end). |
843 Address space_end = new_space_.ToSpaceEnd(); | 844 Address space_end = new_space_.ToSpaceEnd(); |
844 if (from_top < space_end) { | 845 if (from_top < space_end) { |
845 Page* page = Page::FromAddress(from_top); | 846 Page* page = Page::FromAddress(from_top); |
846 if (page->Contains(from_top)) { | 847 if (page->Contains(from_top)) { |
847 int remaining_in_page = static_cast<int>(page->area_end() - from_top); | 848 int remaining_in_page = static_cast<int>(page->area_end() - from_top); |
848 CreateFillerObjectAt(from_top, remaining_in_page); | 849 CreateFillerObjectAt(from_top, remaining_in_page); |
849 } | 850 } |
850 } | 851 } |
851 } | 852 } |
852 | 853 |
853 | 854 |
854 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, | 855 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, |
855 const char* collector_reason, | 856 const char* collector_reason) { |
856 const v8::GCCallbackFlags gc_callback_flags) { | |
857 // The VM is in the GC state until exiting this function. | 857 // The VM is in the GC state until exiting this function. |
858 VMState<GC> state(isolate_); | 858 VMState<GC> state(isolate_); |
859 | 859 |
860 #ifdef DEBUG | 860 #ifdef DEBUG |
861 // Reset the allocation timeout to the GC interval, but make sure to | 861 // Reset the allocation timeout to the GC interval, but make sure to |
862 // allow at least a few allocations after a collection. The reason | 862 // allow at least a few allocations after a collection. The reason |
863 // for this is that we have a lot of allocation sequences and we | 863 // for this is that we have a lot of allocation sequences and we |
864 // assume that a garbage collection will allow the subsequent | 864 // assume that a garbage collection will allow the subsequent |
865 // allocation attempts to go through. | 865 // allocation attempts to go through. |
866 allocation_timeout_ = Max(6, FLAG_gc_interval); | 866 allocation_timeout_ = Max(6, FLAG_gc_interval); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
901 { | 901 { |
902 tracer()->Start(collector, gc_reason, collector_reason); | 902 tracer()->Start(collector, gc_reason, collector_reason); |
903 DCHECK(AllowHeapAllocation::IsAllowed()); | 903 DCHECK(AllowHeapAllocation::IsAllowed()); |
904 DisallowHeapAllocation no_allocation_during_gc; | 904 DisallowHeapAllocation no_allocation_during_gc; |
905 GarbageCollectionPrologue(); | 905 GarbageCollectionPrologue(); |
906 | 906 |
907 { | 907 { |
908 HistogramTimerScope histogram_timer_scope( | 908 HistogramTimerScope histogram_timer_scope( |
909 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() | 909 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() |
910 : isolate_->counters()->gc_compactor()); | 910 : isolate_->counters()->gc_compactor()); |
911 next_gc_likely_to_collect_more = | 911 next_gc_likely_to_collect_more = PerformGarbageCollection(collector); |
912 PerformGarbageCollection(collector, gc_callback_flags); | |
913 } | 912 } |
914 | 913 |
915 GarbageCollectionEpilogue(); | 914 GarbageCollectionEpilogue(); |
916 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) { | 915 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) { |
917 isolate()->CheckDetachedContextsAfterGC(); | 916 isolate()->CheckDetachedContextsAfterGC(); |
918 } | 917 } |
919 | 918 |
920 if (collector == MARK_COMPACTOR) { | 919 if (collector == MARK_COMPACTOR) { |
921 intptr_t committed_memory_after = CommittedOldGenerationMemory(); | 920 intptr_t committed_memory_after = CommittedOldGenerationMemory(); |
922 intptr_t used_memory_after = PromotedSpaceSizeOfObjects(); | 921 intptr_t used_memory_after = PromotedSpaceSizeOfObjects(); |
(...skipping 10 matching lines...) Expand all Loading... | |
933 (detached_contexts()->length() > 0); | 932 (detached_contexts()->length() > 0); |
934 if (deserialization_complete_) { | 933 if (deserialization_complete_) { |
935 memory_reducer_->NotifyMarkCompact(event); | 934 memory_reducer_->NotifyMarkCompact(event); |
936 } | 935 } |
937 } | 936 } |
938 | 937 |
939 tracer()->Stop(collector); | 938 tracer()->Stop(collector); |
940 } | 939 } |
941 | 940 |
942 if (collector == MARK_COMPACTOR && | 941 if (collector == MARK_COMPACTOR && |
943 (gc_callback_flags & kGCCallbackFlagForced) != 0) { | 942 (current_gc_callback_flags_ & kGCCallbackFlagForced) != 0) { |
944 isolate()->CountUsage(v8::Isolate::kForcedGC); | 943 isolate()->CountUsage(v8::Isolate::kForcedGC); |
945 } | 944 } |
946 | 945 |
947 // Start incremental marking for the next cycle. The heap snapshot | 946 // Start incremental marking for the next cycle. The heap snapshot |
948 // generator needs incremental marking to stay off after it aborted. | 947 // generator needs incremental marking to stay off after it aborted. |
949 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() && | 948 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() && |
950 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { | 949 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { |
951 StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue"); | 950 StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue"); |
952 } | 951 } |
953 | 952 |
(...skipping 14 matching lines...) Expand all Loading... | |
968 set_retained_maps(ArrayList::cast(empty_fixed_array())); | 967 set_retained_maps(ArrayList::cast(empty_fixed_array())); |
969 tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis()); | 968 tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis()); |
970 MemoryReducer::Event event; | 969 MemoryReducer::Event event; |
971 event.type = MemoryReducer::kContextDisposed; | 970 event.type = MemoryReducer::kContextDisposed; |
972 event.time_ms = MonotonicallyIncreasingTimeInMs(); | 971 event.time_ms = MonotonicallyIncreasingTimeInMs(); |
973 memory_reducer_->NotifyContextDisposed(event); | 972 memory_reducer_->NotifyContextDisposed(event); |
974 return ++contexts_disposed_; | 973 return ++contexts_disposed_; |
975 } | 974 } |
976 | 975 |
977 | 976 |
978 void Heap::StartIncrementalMarking(int gc_flags, | 977 void Heap::StartIncrementalMarking(const GCFlags gc_flags, |
979 const GCCallbackFlags gc_callback_flags, | 978 const GCCallbackFlags gc_callback_flags, |
980 const char* reason) { | 979 const char* reason) { |
981 DCHECK(incremental_marking()->IsStopped()); | 980 DCHECK(incremental_marking()->IsStopped()); |
982 set_current_gc_flags(gc_flags); | 981 set_current_gc_flags(gc_flags); |
983 current_gc_callback_flags_ = gc_callback_flags; | 982 current_gc_callback_flags_ = gc_callback_flags; |
984 incremental_marking()->Start(reason); | 983 incremental_marking()->Start(reason); |
985 } | 984 } |
986 | 985 |
987 | 986 |
988 void Heap::StartIdleIncrementalMarking() { | 987 void Heap::StartIdleIncrementalMarking() { |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1070 chunk.start = free_space_address; | 1069 chunk.start = free_space_address; |
1071 chunk.end = free_space_address + size; | 1070 chunk.end = free_space_address + size; |
1072 } else { | 1071 } else { |
1073 perform_gc = true; | 1072 perform_gc = true; |
1074 break; | 1073 break; |
1075 } | 1074 } |
1076 } | 1075 } |
1077 } | 1076 } |
1078 if (perform_gc) { | 1077 if (perform_gc) { |
1079 if (space == NEW_SPACE) { | 1078 if (space == NEW_SPACE) { |
1080 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space"); | 1079 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space", |
1080 kNoGCFlags, kNoGCCallbackFlags); | |
1081 } else { | 1081 } else { |
1082 if (counter > 1) { | 1082 if (counter > 1) { |
1083 CollectAllGarbage( | 1083 CollectAllGarbage( |
1084 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, | |
1085 "failed to reserve space in paged or large " | 1084 "failed to reserve space in paged or large " |
1086 "object space, trying to reduce memory footprint"); | 1085 "object space, trying to reduce memory footprint", |
1086 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask); | |
1087 } else { | 1087 } else { |
1088 CollectAllGarbage( | 1088 CollectAllGarbage( |
1089 kAbortIncrementalMarkingMask, | 1089 "failed to reserve space in paged or large object space", |
1090 "failed to reserve space in paged or large object space"); | 1090 kAbortIncrementalMarkingMask); |
1091 } | 1091 } |
1092 } | 1092 } |
1093 gc_performed = true; | 1093 gc_performed = true; |
1094 break; // Abort for-loop over spaces and retry. | 1094 break; // Abort for-loop over spaces and retry. |
1095 } | 1095 } |
1096 } | 1096 } |
1097 } | 1097 } |
1098 | 1098 |
1099 return !gc_performed; | 1099 return !gc_performed; |
1100 } | 1100 } |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1149 | 1149 |
1150 double survival_rate = promotion_ratio_ + semi_space_copied_rate_; | 1150 double survival_rate = promotion_ratio_ + semi_space_copied_rate_; |
1151 tracer()->AddSurvivalRatio(survival_rate); | 1151 tracer()->AddSurvivalRatio(survival_rate); |
1152 if (survival_rate > kYoungSurvivalRateHighThreshold) { | 1152 if (survival_rate > kYoungSurvivalRateHighThreshold) { |
1153 high_survival_rate_period_length_++; | 1153 high_survival_rate_period_length_++; |
1154 } else { | 1154 } else { |
1155 high_survival_rate_period_length_ = 0; | 1155 high_survival_rate_period_length_ = 0; |
1156 } | 1156 } |
1157 } | 1157 } |
1158 | 1158 |
1159 bool Heap::PerformGarbageCollection( | 1159 |
1160 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { | 1160 bool Heap::PerformGarbageCollection(GarbageCollector collector) { |
1161 int freed_global_handles = 0; | 1161 int freed_global_handles = 0; |
1162 | 1162 |
1163 if (collector != SCAVENGER) { | 1163 if (collector != SCAVENGER) { |
1164 PROFILE(isolate_, CodeMovingGCEvent()); | 1164 PROFILE(isolate_, CodeMovingGCEvent()); |
1165 } | 1165 } |
1166 | 1166 |
1167 #ifdef VERIFY_HEAP | 1167 #ifdef VERIFY_HEAP |
1168 if (FLAG_verify_heap) { | 1168 if (FLAG_verify_heap) { |
1169 VerifyStringTable(this); | 1169 VerifyStringTable(this); |
1170 } | 1170 } |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1227 mark_compact_collector_.EnsureMarkingDequeIsCommitted( | 1227 mark_compact_collector_.EnsureMarkingDequeIsCommitted( |
1228 MarkCompactCollector::kMinMarkingDequeSize); | 1228 MarkCompactCollector::kMinMarkingDequeSize); |
1229 } | 1229 } |
1230 | 1230 |
1231 gc_post_processing_depth_++; | 1231 gc_post_processing_depth_++; |
1232 { | 1232 { |
1233 AllowHeapAllocation allow_allocation; | 1233 AllowHeapAllocation allow_allocation; |
1234 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | 1234 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); |
1235 freed_global_handles = | 1235 freed_global_handles = |
1236 isolate_->global_handles()->PostGarbageCollectionProcessing( | 1236 isolate_->global_handles()->PostGarbageCollectionProcessing( |
1237 collector, gc_callback_flags); | 1237 collector, current_gc_callback_flags_); |
1238 } | 1238 } |
1239 gc_post_processing_depth_--; | 1239 gc_post_processing_depth_--; |
1240 | 1240 |
1241 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); | 1241 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); |
1242 | 1242 |
1243 // Update relocatables. | 1243 // Update relocatables. |
1244 Relocatable::PostGarbageCollectionProcessing(isolate_); | 1244 Relocatable::PostGarbageCollectionProcessing(isolate_); |
1245 | 1245 |
1246 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond(); | 1246 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond(); |
1247 double mutator_speed = static_cast<double>( | 1247 double mutator_speed = static_cast<double>( |
(...skipping 10 matching lines...) Expand all Loading... | |
1258 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); | 1258 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); |
1259 } | 1259 } |
1260 | 1260 |
1261 { | 1261 { |
1262 GCCallbacksScope scope(this); | 1262 GCCallbacksScope scope(this); |
1263 if (scope.CheckReenter()) { | 1263 if (scope.CheckReenter()) { |
1264 AllowHeapAllocation allow_allocation; | 1264 AllowHeapAllocation allow_allocation; |
1265 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | 1265 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); |
1266 VMState<EXTERNAL> state(isolate_); | 1266 VMState<EXTERNAL> state(isolate_); |
1267 HandleScope handle_scope(isolate_); | 1267 HandleScope handle_scope(isolate_); |
1268 CallGCEpilogueCallbacks(gc_type, gc_callback_flags); | 1268 CallGCEpilogueCallbacks(gc_type, current_gc_callback_flags_); |
1269 } | 1269 } |
1270 } | 1270 } |
1271 | 1271 |
1272 #ifdef VERIFY_HEAP | 1272 #ifdef VERIFY_HEAP |
1273 if (FLAG_verify_heap) { | 1273 if (FLAG_verify_heap) { |
1274 VerifyStringTable(this); | 1274 VerifyStringTable(this); |
1275 } | 1275 } |
1276 #endif | 1276 #endif |
1277 | 1277 |
1278 return freed_global_handles > 0; | 1278 return freed_global_handles > 0; |
(...skipping 3228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4507 bool Heap::IsHeapIterable() { | 4507 bool Heap::IsHeapIterable() { |
4508 // TODO(hpayer): This function is not correct. Allocation folding in old | 4508 // TODO(hpayer): This function is not correct. Allocation folding in old |
4509 // space breaks the iterability. | 4509 // space breaks the iterability. |
4510 return new_space_top_after_last_gc_ == new_space()->top(); | 4510 return new_space_top_after_last_gc_ == new_space()->top(); |
4511 } | 4511 } |
4512 | 4512 |
4513 | 4513 |
4514 void Heap::MakeHeapIterable() { | 4514 void Heap::MakeHeapIterable() { |
4515 DCHECK(AllowHeapAllocation::IsAllowed()); | 4515 DCHECK(AllowHeapAllocation::IsAllowed()); |
4516 if (!IsHeapIterable()) { | 4516 if (!IsHeapIterable()) { |
4517 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable"); | 4517 CollectAllGarbage("Heap::MakeHeapIterable", kMakeHeapIterableMask); |
4518 } | 4518 } |
4519 if (mark_compact_collector()->sweeping_in_progress()) { | 4519 if (mark_compact_collector()->sweeping_in_progress()) { |
4520 mark_compact_collector()->EnsureSweepingCompleted(); | 4520 mark_compact_collector()->EnsureSweepingCompleted(); |
4521 } | 4521 } |
4522 DCHECK(IsHeapIterable()); | 4522 DCHECK(IsHeapIterable()); |
4523 } | 4523 } |
4524 | 4524 |
4525 | 4525 |
4526 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) { | 4526 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) { |
4527 const double kMinMutatorUtilization = 0.0; | 4527 const double kMinMutatorUtilization = 0.0; |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4632 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure( | 4632 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure( |
4633 static_cast<size_t>(idle_time_in_ms))))) { | 4633 static_cast<size_t>(idle_time_in_ms))))) { |
4634 OverApproximateWeakClosure( | 4634 OverApproximateWeakClosure( |
4635 "Idle notification: overapproximate weak closure"); | 4635 "Idle notification: overapproximate weak closure"); |
4636 return true; | 4636 return true; |
4637 } else if (incremental_marking()->IsComplete() || | 4637 } else if (incremental_marking()->IsComplete() || |
4638 (mark_compact_collector_.marking_deque()->IsEmpty() && | 4638 (mark_compact_collector_.marking_deque()->IsEmpty() && |
4639 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact( | 4639 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact( |
4640 static_cast<size_t>(idle_time_in_ms), size_of_objects, | 4640 static_cast<size_t>(idle_time_in_ms), size_of_objects, |
4641 final_incremental_mark_compact_speed_in_bytes_per_ms))) { | 4641 final_incremental_mark_compact_speed_in_bytes_per_ms))) { |
4642 CollectAllGarbage(current_gc_flags_, | 4642 CollectAllGarbage("idle notification: finalize incremental", |
4643 "idle notification: finalize incremental"); | 4643 current_gc_flags_); |
4644 return true; | 4644 return true; |
4645 } | 4645 } |
4646 return false; | 4646 return false; |
4647 } | 4647 } |
4648 | 4648 |
4649 | 4649 |
4650 GCIdleTimeHandler::HeapState Heap::ComputeHeapState() { | 4650 GCIdleTimeHandler::HeapState Heap::ComputeHeapState() { |
4651 GCIdleTimeHandler::HeapState heap_state; | 4651 GCIdleTimeHandler::HeapState heap_state; |
4652 heap_state.contexts_disposed = contexts_disposed_; | 4652 heap_state.contexts_disposed = contexts_disposed_; |
4653 heap_state.contexts_disposal_rate = | 4653 heap_state.contexts_disposal_rate = |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4716 if (remaining_idle_time_in_ms > 0.0) { | 4716 if (remaining_idle_time_in_ms > 0.0) { |
4717 action.additional_work = TryFinalizeIdleIncrementalMarking( | 4717 action.additional_work = TryFinalizeIdleIncrementalMarking( |
4718 remaining_idle_time_in_ms, heap_state.size_of_objects, | 4718 remaining_idle_time_in_ms, heap_state.size_of_objects, |
4719 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms); | 4719 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms); |
4720 } | 4720 } |
4721 break; | 4721 break; |
4722 } | 4722 } |
4723 case DO_FULL_GC: { | 4723 case DO_FULL_GC: { |
4724 DCHECK(contexts_disposed_ > 0); | 4724 DCHECK(contexts_disposed_ > 0); |
4725 HistogramTimerScope scope(isolate_->counters()->gc_context()); | 4725 HistogramTimerScope scope(isolate_->counters()->gc_context()); |
4726 CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed"); | 4726 CollectAllGarbage("idle notification: contexts disposed", kNoGCFlags); |
4727 break; | 4727 break; |
4728 } | 4728 } |
4729 case DO_SCAVENGE: | 4729 case DO_SCAVENGE: |
4730 CollectGarbage(NEW_SPACE, "idle notification: scavenge"); | 4730 CollectGarbage(NEW_SPACE, "idle notification: scavenge", kNoGCFlags, |
4731 kNoGCCallbackFlags); | |
4731 break; | 4732 break; |
4732 case DO_FINALIZE_SWEEPING: | 4733 case DO_FINALIZE_SWEEPING: |
4733 mark_compact_collector()->EnsureSweepingCompleted(); | 4734 mark_compact_collector()->EnsureSweepingCompleted(); |
4734 break; | 4735 break; |
4735 case DO_NOTHING: | 4736 case DO_NOTHING: |
4736 break; | 4737 break; |
4737 } | 4738 } |
4738 | 4739 |
4739 return result; | 4740 return result; |
4740 } | 4741 } |
(...skipping 2034 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6775 *object_type = "CODE_TYPE"; \ | 6776 *object_type = "CODE_TYPE"; \ |
6776 *object_sub_type = "CODE_AGE/" #name; \ | 6777 *object_sub_type = "CODE_AGE/" #name; \ |
6777 return true; | 6778 return true; |
6778 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) | 6779 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) |
6779 #undef COMPARE_AND_RETURN_NAME | 6780 #undef COMPARE_AND_RETURN_NAME |
6780 } | 6781 } |
6781 return false; | 6782 return false; |
6782 } | 6783 } |
6783 } // namespace internal | 6784 } // namespace internal |
6784 } // namespace v8 | 6785 } // namespace v8 |
OLD | NEW |