Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(228)

Side by Side Diff: src/heap/heap.cc

Issue 1314863003: [heap] More flag cleanup. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Added more comments Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
43 namespace internal { 43 namespace internal {
44 44
45 45
46 struct Heap::StrongRootsList { 46 struct Heap::StrongRootsList {
47 Object** start; 47 Object** start;
48 Object** end; 48 Object** end;
49 StrongRootsList* next; 49 StrongRootsList* next;
50 }; 50 };
51 51
52 52
53 DEFINE_OPERATORS_FOR_FLAGS(Heap::GCFlags)
54
55
53 Heap::Heap() 56 Heap::Heap()
54 : amount_of_external_allocated_memory_(0), 57 : amount_of_external_allocated_memory_(0),
55 amount_of_external_allocated_memory_at_last_global_gc_(0), 58 amount_of_external_allocated_memory_at_last_global_gc_(0),
56 isolate_(NULL), 59 isolate_(NULL),
57 code_range_size_(0), 60 code_range_size_(0),
58 // semispace_size_ should be a power of 2 and old_generation_size_ should 61 // semispace_size_ should be a power of 2 and old_generation_size_ should
59 // be a multiple of Page::kPageSize. 62 // be a multiple of Page::kPageSize.
60 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), 63 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
61 max_semi_space_size_(8 * (kPointerSize / 4) * MB), 64 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
62 initial_semispace_size_(Page::kPageSize), 65 initial_semispace_size_(Page::kPageSize),
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
126 crankshaft_codegen_bytes_generated_(0), 129 crankshaft_codegen_bytes_generated_(0),
127 new_space_allocation_counter_(0), 130 new_space_allocation_counter_(0),
128 old_generation_allocation_counter_(0), 131 old_generation_allocation_counter_(0),
129 old_generation_size_at_last_gc_(0), 132 old_generation_size_at_last_gc_(0),
130 gcs_since_last_deopt_(0), 133 gcs_since_last_deopt_(0),
131 allocation_sites_scratchpad_length_(0), 134 allocation_sites_scratchpad_length_(0),
132 ring_buffer_full_(false), 135 ring_buffer_full_(false),
133 ring_buffer_end_(0), 136 ring_buffer_end_(0),
134 promotion_queue_(this), 137 promotion_queue_(this),
135 configured_(false), 138 configured_(false),
136 current_gc_flags_(Heap::kNoGCFlags), 139 current_gc_flags_(kNoGCFlags),
137 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags), 140 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
138 external_string_table_(this), 141 external_string_table_(this),
139 chunks_queued_for_free_(NULL), 142 chunks_queued_for_free_(NULL),
140 concurrent_unmapping_tasks_active_(0), 143 concurrent_unmapping_tasks_active_(0),
141 pending_unmapping_tasks_semaphore_(0), 144 pending_unmapping_tasks_semaphore_(0),
142 gc_callbacks_depth_(0), 145 gc_callbacks_depth_(0),
143 deserialization_complete_(false), 146 deserialization_complete_(false),
144 concurrent_sweeping_enabled_(false), 147 concurrent_sweeping_enabled_(false),
145 strong_roots_list_(NULL) { 148 strong_roots_list_(NULL) {
146 // Allow build-time customization of the max semispace size. Building 149 // Allow build-time customization of the max semispace size. Building
(...skipping 596 matching lines...) Expand 10 before | Expand all | Expand 10 after
743 } 746 }
744 // We must not compact the weak fixed list here, as we may be in the middle 747 // We must not compact the weak fixed list here, as we may be in the middle
745 // of writing to it, when the GC triggered. Instead, we reset the root value. 748 // of writing to it, when the GC triggered. Instead, we reset the root value.
746 set_weak_stack_trace_list(Smi::FromInt(0)); 749 set_weak_stack_trace_list(Smi::FromInt(0));
747 } 750 }
748 751
749 752
750 void Heap::HandleGCRequest() { 753 void Heap::HandleGCRequest() {
751 if (incremental_marking()->request_type() == 754 if (incremental_marking()->request_type() ==
752 IncrementalMarking::COMPLETE_MARKING) { 755 IncrementalMarking::COMPLETE_MARKING) {
753 CollectAllGarbage(current_gc_flags_, "GC interrupt", 756 CollectAllGarbage("GC interrupt", current_gc_flags_,
754 current_gc_callback_flags_); 757 current_gc_callback_flags_);
755 return; 758 return;
756 } 759 }
757 DCHECK(FLAG_overapproximate_weak_closure); 760 DCHECK(FLAG_overapproximate_weak_closure);
758 if (!incremental_marking()->weak_closure_was_overapproximated()) { 761 if (!incremental_marking()->weak_closure_was_overapproximated()) {
759 OverApproximateWeakClosure("GC interrupt"); 762 OverApproximateWeakClosure("GC interrupt");
760 } 763 }
761 } 764 }
762 765
763 766
(...skipping 23 matching lines...) Expand all
787 AllowHeapAllocation allow_allocation; 790 AllowHeapAllocation allow_allocation;
788 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); 791 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
789 VMState<EXTERNAL> state(isolate_); 792 VMState<EXTERNAL> state(isolate_);
790 HandleScope handle_scope(isolate_); 793 HandleScope handle_scope(isolate_);
791 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags); 794 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
792 } 795 }
793 } 796 }
794 } 797 }
795 798
796 799
797 void Heap::CollectAllGarbage(int flags, const char* gc_reason, 800 void Heap::CollectAllGarbage(const char* gc_reason, const GCFlags flags,
798 const v8::GCCallbackFlags gc_callback_flags) { 801 const v8::GCCallbackFlags gc_callback_flags) {
799 // Since we are ignoring the return value, the exact choice of space does 802 // Since we are ignoring the return value, the exact choice of space does
800 // not matter, so long as we do not specify NEW_SPACE, which would not 803 // not matter, so long as we do not specify NEW_SPACE, which would not
801 // cause a full GC. 804 // cause a full GC.
802 set_current_gc_flags(flags); 805 CollectGarbage(OLD_SPACE, gc_reason, flags, gc_callback_flags);
803 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
804 set_current_gc_flags(kNoGCFlags);
805 } 806 }
806 807
807 808
808 void Heap::CollectAllAvailableGarbage(const char* gc_reason) { 809 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
809 // Since we are ignoring the return value, the exact choice of space does 810 // Since we are ignoring the return value, the exact choice of space does
810 // not matter, so long as we do not specify NEW_SPACE, which would not 811 // not matter, so long as we do not specify NEW_SPACE, which would not
811 // cause a full GC. 812 // cause a full GC.
812 // Major GC would invoke weak handle callbacks on weakly reachable 813 // Major GC would invoke weak handle callbacks on weakly reachable
813 // handles, but won't collect weakly reachable objects until next 814 // handles, but won't collect weakly reachable objects until next
814 // major GC. Therefore if we collect aggressively and weak handle callback 815 // major GC. Therefore if we collect aggressively and weak handle callback
815 // has been invoked, we rerun major GC to release objects which become 816 // has been invoked, we rerun major GC to release objects which become
816 // garbage. 817 // garbage.
817 // Note: as weak callbacks can execute arbitrary code, we cannot 818 // Note: as weak callbacks can execute arbitrary code, we cannot
818 // hope that eventually there will be no weak callbacks invocations. 819 // hope that eventually there will be no weak callbacks invocations.
819 // Therefore stop recollecting after several attempts. 820 // Therefore stop recollecting after several attempts.
820 if (isolate()->concurrent_recompilation_enabled()) { 821 if (isolate()->concurrent_recompilation_enabled()) {
821 // The optimizing compiler may be unnecessarily holding on to memory. 822 // The optimizing compiler may be unnecessarily holding on to memory.
822 DisallowHeapAllocation no_recursive_gc; 823 DisallowHeapAllocation no_recursive_gc;
823 isolate()->optimizing_compile_dispatcher()->Flush(); 824 isolate()->optimizing_compile_dispatcher()->Flush();
824 } 825 }
825 isolate()->ClearSerializerData(); 826 isolate()->ClearSerializerData();
826 set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask); 827 isolate()->compilation_cache()->Clear();
827 isolate_->compilation_cache()->Clear();
828 const int kMaxNumberOfAttempts = 7; 828 const int kMaxNumberOfAttempts = 7;
829 const int kMinNumberOfAttempts = 2; 829 const int kMinNumberOfAttempts = 2;
830 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { 830 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
831 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL, 831 if (!CollectGarbage(OLD_SPACE, gc_reason,
832 v8::kGCCallbackFlagForced) && 832 Heap::kAbortIncrementalMarkingMask |
833 attempt + 1 >= kMinNumberOfAttempts) { 833 Heap::kReduceMemoryFootprintMask,
834 kGCCallbackFlagForced) &&
835 ((attempt + 1) >= kMinNumberOfAttempts)) {
834 break; 836 break;
835 } 837 }
836 } 838 }
837 set_current_gc_flags(kNoGCFlags);
838 new_space_.Shrink(); 839 new_space_.Shrink();
839 UncommitFromSpace(); 840 UncommitFromSpace();
840 } 841 }
841 842
842 843
843 void Heap::EnsureFillerObjectAtTop() { 844 void Heap::EnsureFillerObjectAtTop() {
844 // There may be an allocation memento behind every object in new space. 845 // There may be an allocation memento behind every object in new space.
845 // If we evacuate a not full new space or if we are on the last page of 846 // If we evacuate a not full new space or if we are on the last page of
846 // the new space, then there may be uninitialized memory behind the top 847 // the new space, then there may be uninitialized memory behind the top
847 // pointer of the new space page. We store a filler object there to 848 // pointer of the new space page. We store a filler object there to
848 // identify the unused space. 849 // identify the unused space.
849 Address from_top = new_space_.top(); 850 Address from_top = new_space_.top();
850 // Check that from_top is inside its page (i.e., not at the end). 851 // Check that from_top is inside its page (i.e., not at the end).
851 Address space_end = new_space_.ToSpaceEnd(); 852 Address space_end = new_space_.ToSpaceEnd();
852 if (from_top < space_end) { 853 if (from_top < space_end) {
853 Page* page = Page::FromAddress(from_top); 854 Page* page = Page::FromAddress(from_top);
854 if (page->Contains(from_top)) { 855 if (page->Contains(from_top)) {
855 int remaining_in_page = static_cast<int>(page->area_end() - from_top); 856 int remaining_in_page = static_cast<int>(page->area_end() - from_top);
856 CreateFillerObjectAt(from_top, remaining_in_page); 857 CreateFillerObjectAt(from_top, remaining_in_page);
857 } 858 }
858 } 859 }
859 } 860 }
860 861
861 862
862 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, 863 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
863 const char* collector_reason, 864 const char* collector_reason) {
864 const v8::GCCallbackFlags gc_callback_flags) {
865 // The VM is in the GC state until exiting this function. 865 // The VM is in the GC state until exiting this function.
866 VMState<GC> state(isolate_); 866 VMState<GC> state(isolate_);
867 867
868 #ifdef DEBUG 868 #ifdef DEBUG
869 // Reset the allocation timeout to the GC interval, but make sure to 869 // Reset the allocation timeout to the GC interval, but make sure to
870 // allow at least a few allocations after a collection. The reason 870 // allow at least a few allocations after a collection. The reason
871 // for this is that we have a lot of allocation sequences and we 871 // for this is that we have a lot of allocation sequences and we
872 // assume that a garbage collection will allow the subsequent 872 // assume that a garbage collection will allow the subsequent
873 // allocation attempts to go through. 873 // allocation attempts to go through.
874 allocation_timeout_ = Max(6, FLAG_gc_interval); 874 allocation_timeout_ = Max(6, FLAG_gc_interval);
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
909 { 909 {
910 tracer()->Start(collector, gc_reason, collector_reason); 910 tracer()->Start(collector, gc_reason, collector_reason);
911 DCHECK(AllowHeapAllocation::IsAllowed()); 911 DCHECK(AllowHeapAllocation::IsAllowed());
912 DisallowHeapAllocation no_allocation_during_gc; 912 DisallowHeapAllocation no_allocation_during_gc;
913 GarbageCollectionPrologue(); 913 GarbageCollectionPrologue();
914 914
915 { 915 {
916 HistogramTimerScope histogram_timer_scope( 916 HistogramTimerScope histogram_timer_scope(
917 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() 917 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
918 : isolate_->counters()->gc_compactor()); 918 : isolate_->counters()->gc_compactor());
919 next_gc_likely_to_collect_more = 919 next_gc_likely_to_collect_more = PerformGarbageCollection(collector);
920 PerformGarbageCollection(collector, gc_callback_flags);
921 } 920 }
922 921
923 GarbageCollectionEpilogue(); 922 GarbageCollectionEpilogue();
924 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) { 923 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
925 isolate()->CheckDetachedContextsAfterGC(); 924 isolate()->CheckDetachedContextsAfterGC();
926 } 925 }
927 926
928 if (collector == MARK_COMPACTOR) { 927 if (collector == MARK_COMPACTOR) {
929 intptr_t committed_memory_after = CommittedOldGenerationMemory(); 928 intptr_t committed_memory_after = CommittedOldGenerationMemory();
930 intptr_t used_memory_after = PromotedSpaceSizeOfObjects(); 929 intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
(...skipping 10 matching lines...) Expand all
941 (detached_contexts()->length() > 0); 940 (detached_contexts()->length() > 0);
942 if (deserialization_complete_) { 941 if (deserialization_complete_) {
943 memory_reducer_->NotifyMarkCompact(event); 942 memory_reducer_->NotifyMarkCompact(event);
944 } 943 }
945 } 944 }
946 945
947 tracer()->Stop(collector); 946 tracer()->Stop(collector);
948 } 947 }
949 948
950 if (collector == MARK_COMPACTOR && 949 if (collector == MARK_COMPACTOR &&
951 (gc_callback_flags & kGCCallbackFlagForced) != 0) { 950 (current_gc_callback_flags_ & kGCCallbackFlagForced) != 0) {
952 isolate()->CountUsage(v8::Isolate::kForcedGC); 951 isolate()->CountUsage(v8::Isolate::kForcedGC);
953 } 952 }
954 953
955 // Start incremental marking for the next cycle. The heap snapshot 954 // Start incremental marking for the next cycle. The heap snapshot
956 // generator needs incremental marking to stay off after it aborted. 955 // generator needs incremental marking to stay off after it aborted.
957 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() && 956 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
958 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { 957 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
959 StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue"); 958 StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
960 } 959 }
961 960
(...skipping 14 matching lines...) Expand all
976 set_retained_maps(ArrayList::cast(empty_fixed_array())); 975 set_retained_maps(ArrayList::cast(empty_fixed_array()));
977 tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis()); 976 tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
978 MemoryReducer::Event event; 977 MemoryReducer::Event event;
979 event.type = MemoryReducer::kContextDisposed; 978 event.type = MemoryReducer::kContextDisposed;
980 event.time_ms = MonotonicallyIncreasingTimeInMs(); 979 event.time_ms = MonotonicallyIncreasingTimeInMs();
981 memory_reducer_->NotifyContextDisposed(event); 980 memory_reducer_->NotifyContextDisposed(event);
982 return ++contexts_disposed_; 981 return ++contexts_disposed_;
983 } 982 }
984 983
985 984
986 void Heap::StartIncrementalMarking(int gc_flags, 985 void Heap::StartIncrementalMarking(const GCFlags gc_flags,
987 const GCCallbackFlags gc_callback_flags, 986 const GCCallbackFlags gc_callback_flags,
988 const char* reason) { 987 const char* reason) {
989 DCHECK(incremental_marking()->IsStopped()); 988 DCHECK(incremental_marking()->IsStopped());
990 set_current_gc_flags(gc_flags); 989 set_current_gc_flags(gc_flags);
991 current_gc_callback_flags_ = gc_callback_flags; 990 current_gc_callback_flags_ = gc_callback_flags;
992 incremental_marking()->Start(reason); 991 incremental_marking()->Start(reason);
993 } 992 }
994 993
995 994
996 void Heap::StartIdleIncrementalMarking() { 995 void Heap::StartIdleIncrementalMarking() {
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1078 chunk.start = free_space_address; 1077 chunk.start = free_space_address;
1079 chunk.end = free_space_address + size; 1078 chunk.end = free_space_address + size;
1080 } else { 1079 } else {
1081 perform_gc = true; 1080 perform_gc = true;
1082 break; 1081 break;
1083 } 1082 }
1084 } 1083 }
1085 } 1084 }
1086 if (perform_gc) { 1085 if (perform_gc) {
1087 if (space == NEW_SPACE) { 1086 if (space == NEW_SPACE) {
1088 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space"); 1087 CollectGarbageNewSpace("failed to reserve space in the new space");
1089 } else { 1088 } else {
1090 if (counter > 1) { 1089 if (counter > 1) {
1091 CollectAllGarbage( 1090 CollectAllGarbage(
1092 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1093 "failed to reserve space in paged or large " 1091 "failed to reserve space in paged or large "
1094 "object space, trying to reduce memory footprint"); 1092 "object space, trying to reduce memory footprint",
1093 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask);
1095 } else { 1094 } else {
1096 CollectAllGarbage( 1095 CollectAllGarbage(
1097 kAbortIncrementalMarkingMask, 1096 "failed to reserve space in paged or large object space",
1098 "failed to reserve space in paged or large object space"); 1097 kAbortIncrementalMarkingMask);
1099 } 1098 }
1100 } 1099 }
1101 gc_performed = true; 1100 gc_performed = true;
1102 break; // Abort for-loop over spaces and retry. 1101 break; // Abort for-loop over spaces and retry.
1103 } 1102 }
1104 } 1103 }
1105 } 1104 }
1106 1105
1107 return !gc_performed; 1106 return !gc_performed;
1108 } 1107 }
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
1157 1156
1158 double survival_rate = promotion_ratio_ + semi_space_copied_rate_; 1157 double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1159 tracer()->AddSurvivalRatio(survival_rate); 1158 tracer()->AddSurvivalRatio(survival_rate);
1160 if (survival_rate > kYoungSurvivalRateHighThreshold) { 1159 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1161 high_survival_rate_period_length_++; 1160 high_survival_rate_period_length_++;
1162 } else { 1161 } else {
1163 high_survival_rate_period_length_ = 0; 1162 high_survival_rate_period_length_ = 0;
1164 } 1163 }
1165 } 1164 }
1166 1165
1167 bool Heap::PerformGarbageCollection( 1166
1168 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { 1167 bool Heap::PerformGarbageCollection(GarbageCollector collector) {
1169 int freed_global_handles = 0; 1168 int freed_global_handles = 0;
1170 1169
1171 if (collector != SCAVENGER) { 1170 if (collector != SCAVENGER) {
1172 PROFILE(isolate_, CodeMovingGCEvent()); 1171 PROFILE(isolate_, CodeMovingGCEvent());
1173 } 1172 }
1174 1173
1175 #ifdef VERIFY_HEAP 1174 #ifdef VERIFY_HEAP
1176 if (FLAG_verify_heap) { 1175 if (FLAG_verify_heap) {
1177 VerifyStringTable(this); 1176 VerifyStringTable(this);
1178 } 1177 }
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
1235 mark_compact_collector_.EnsureMarkingDequeIsCommitted( 1234 mark_compact_collector_.EnsureMarkingDequeIsCommitted(
1236 MarkCompactCollector::kMinMarkingDequeSize); 1235 MarkCompactCollector::kMinMarkingDequeSize);
1237 } 1236 }
1238 1237
1239 gc_post_processing_depth_++; 1238 gc_post_processing_depth_++;
1240 { 1239 {
1241 AllowHeapAllocation allow_allocation; 1240 AllowHeapAllocation allow_allocation;
1242 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); 1241 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1243 freed_global_handles = 1242 freed_global_handles =
1244 isolate_->global_handles()->PostGarbageCollectionProcessing( 1243 isolate_->global_handles()->PostGarbageCollectionProcessing(
1245 collector, gc_callback_flags); 1244 collector, current_gc_callback_flags_);
1246 } 1245 }
1247 gc_post_processing_depth_--; 1246 gc_post_processing_depth_--;
1248 1247
1249 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); 1248 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1250 1249
1251 // Update relocatables. 1250 // Update relocatables.
1252 Relocatable::PostGarbageCollectionProcessing(isolate_); 1251 Relocatable::PostGarbageCollectionProcessing(isolate_);
1253 1252
1254 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond(); 1253 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1255 double mutator_speed = static_cast<double>( 1254 double mutator_speed = static_cast<double>(
(...skipping 10 matching lines...) Expand all
1266 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); 1265 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1267 } 1266 }
1268 1267
1269 { 1268 {
1270 GCCallbacksScope scope(this); 1269 GCCallbacksScope scope(this);
1271 if (scope.CheckReenter()) { 1270 if (scope.CheckReenter()) {
1272 AllowHeapAllocation allow_allocation; 1271 AllowHeapAllocation allow_allocation;
1273 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); 1272 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1274 VMState<EXTERNAL> state(isolate_); 1273 VMState<EXTERNAL> state(isolate_);
1275 HandleScope handle_scope(isolate_); 1274 HandleScope handle_scope(isolate_);
1276 CallGCEpilogueCallbacks(gc_type, gc_callback_flags); 1275 CallGCEpilogueCallbacks(gc_type, current_gc_callback_flags_);
1277 } 1276 }
1278 } 1277 }
1279 1278
1280 #ifdef VERIFY_HEAP 1279 #ifdef VERIFY_HEAP
1281 if (FLAG_verify_heap) { 1280 if (FLAG_verify_heap) {
1282 VerifyStringTable(this); 1281 VerifyStringTable(this);
1283 } 1282 }
1284 #endif 1283 #endif
1285 1284
1286 return freed_global_handles > 0; 1285 return freed_global_handles > 0;
(...skipping 3243 matching lines...) Expand 10 before | Expand all | Expand 10 after
4530 bool Heap::IsHeapIterable() { 4529 bool Heap::IsHeapIterable() {
4531 // TODO(hpayer): This function is not correct. Allocation folding in old 4530 // TODO(hpayer): This function is not correct. Allocation folding in old
4532 // space breaks the iterability. 4531 // space breaks the iterability.
4533 return new_space_top_after_last_gc_ == new_space()->top(); 4532 return new_space_top_after_last_gc_ == new_space()->top();
4534 } 4533 }
4535 4534
4536 4535
4537 void Heap::MakeHeapIterable() { 4536 void Heap::MakeHeapIterable() {
4538 DCHECK(AllowHeapAllocation::IsAllowed()); 4537 DCHECK(AllowHeapAllocation::IsAllowed());
4539 if (!IsHeapIterable()) { 4538 if (!IsHeapIterable()) {
4540 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable"); 4539 CollectAllGarbage("Heap::MakeHeapIterable", kMakeHeapIterableMask);
4541 } 4540 }
4542 if (mark_compact_collector()->sweeping_in_progress()) { 4541 if (mark_compact_collector()->sweeping_in_progress()) {
4543 mark_compact_collector()->EnsureSweepingCompleted(); 4542 mark_compact_collector()->EnsureSweepingCompleted();
4544 } 4543 }
4545 DCHECK(IsHeapIterable()); 4544 DCHECK(IsHeapIterable());
4546 } 4545 }
4547 4546
4548 4547
4549 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) { 4548 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
4550 const double kMinMutatorUtilization = 0.0; 4549 const double kMinMutatorUtilization = 0.0;
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
4655 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure( 4654 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure(
4656 static_cast<size_t>(idle_time_in_ms))))) { 4655 static_cast<size_t>(idle_time_in_ms))))) {
4657 OverApproximateWeakClosure( 4656 OverApproximateWeakClosure(
4658 "Idle notification: overapproximate weak closure"); 4657 "Idle notification: overapproximate weak closure");
4659 return true; 4658 return true;
4660 } else if (incremental_marking()->IsComplete() || 4659 } else if (incremental_marking()->IsComplete() ||
4661 (mark_compact_collector_.marking_deque()->IsEmpty() && 4660 (mark_compact_collector_.marking_deque()->IsEmpty() &&
4662 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact( 4661 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
4663 static_cast<size_t>(idle_time_in_ms), size_of_objects, 4662 static_cast<size_t>(idle_time_in_ms), size_of_objects,
4664 final_incremental_mark_compact_speed_in_bytes_per_ms))) { 4663 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4665 CollectAllGarbage(current_gc_flags_, 4664 CollectAllGarbage("idle notification: finalize incremental",
4666 "idle notification: finalize incremental"); 4665 current_gc_flags_);
4667 return true; 4666 return true;
4668 } 4667 }
4669 return false; 4668 return false;
4670 } 4669 }
4671 4670
4672 4671
4673 GCIdleTimeHandler::HeapState Heap::ComputeHeapState() { 4672 GCIdleTimeHandler::HeapState Heap::ComputeHeapState() {
4674 GCIdleTimeHandler::HeapState heap_state; 4673 GCIdleTimeHandler::HeapState heap_state;
4675 heap_state.contexts_disposed = contexts_disposed_; 4674 heap_state.contexts_disposed = contexts_disposed_;
4676 heap_state.contexts_disposal_rate = 4675 heap_state.contexts_disposal_rate =
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
4739 if (remaining_idle_time_in_ms > 0.0) { 4738 if (remaining_idle_time_in_ms > 0.0) {
4740 action.additional_work = TryFinalizeIdleIncrementalMarking( 4739 action.additional_work = TryFinalizeIdleIncrementalMarking(
4741 remaining_idle_time_in_ms, heap_state.size_of_objects, 4740 remaining_idle_time_in_ms, heap_state.size_of_objects,
4742 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms); 4741 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
4743 } 4742 }
4744 break; 4743 break;
4745 } 4744 }
4746 case DO_FULL_GC: { 4745 case DO_FULL_GC: {
4747 DCHECK(contexts_disposed_ > 0); 4746 DCHECK(contexts_disposed_ > 0);
4748 HistogramTimerScope scope(isolate_->counters()->gc_context()); 4747 HistogramTimerScope scope(isolate_->counters()->gc_context());
4749 CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed"); 4748 CollectAllGarbage("idle notification: contexts disposed", kNoGCFlags);
4750 break; 4749 break;
4751 } 4750 }
4752 case DO_SCAVENGE: 4751 case DO_SCAVENGE:
4753 CollectGarbage(NEW_SPACE, "idle notification: scavenge"); 4752 CollectGarbageNewSpace("idle notification: scavenge");
4754 break; 4753 break;
4755 case DO_FINALIZE_SWEEPING: 4754 case DO_FINALIZE_SWEEPING:
4756 mark_compact_collector()->EnsureSweepingCompleted(); 4755 mark_compact_collector()->EnsureSweepingCompleted();
4757 break; 4756 break;
4758 case DO_NOTHING: 4757 case DO_NOTHING:
4759 break; 4758 break;
4760 } 4759 }
4761 4760
4762 return result; 4761 return result;
4763 } 4762 }
(...skipping 2010 matching lines...) Expand 10 before | Expand all | Expand 10 after
6774 *object_type = "CODE_TYPE"; \ 6773 *object_type = "CODE_TYPE"; \
6775 *object_sub_type = "CODE_AGE/" #name; \ 6774 *object_sub_type = "CODE_AGE/" #name; \
6776 return true; 6775 return true;
6777 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) 6776 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6778 #undef COMPARE_AND_RETURN_NAME 6777 #undef COMPARE_AND_RETURN_NAME
6779 } 6778 }
6780 return false; 6779 return false;
6781 } 6780 }
6782 } // namespace internal 6781 } // namespace internal
6783 } // namespace v8 6782 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698