Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(8)

Side by Side Diff: src/heap/heap.cc

Issue 1303393004: Revert of [heap] More flag cleanup. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
43 namespace internal { 43 namespace internal {
44 44
45 45
46 struct Heap::StrongRootsList { 46 struct Heap::StrongRootsList {
47 Object** start; 47 Object** start;
48 Object** end; 48 Object** end;
49 StrongRootsList* next; 49 StrongRootsList* next;
50 }; 50 };
51 51
52 52
53 DEFINE_OPERATORS_FOR_FLAGS(Heap::GCFlags)
54
55
56 Heap::Heap() 53 Heap::Heap()
57 : amount_of_external_allocated_memory_(0), 54 : amount_of_external_allocated_memory_(0),
58 amount_of_external_allocated_memory_at_last_global_gc_(0), 55 amount_of_external_allocated_memory_at_last_global_gc_(0),
59 isolate_(NULL), 56 isolate_(NULL),
60 code_range_size_(0), 57 code_range_size_(0),
61 // semispace_size_ should be a power of 2 and old_generation_size_ should 58 // semispace_size_ should be a power of 2 and old_generation_size_ should
62 // be a multiple of Page::kPageSize. 59 // be a multiple of Page::kPageSize.
63 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), 60 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
64 max_semi_space_size_(8 * (kPointerSize / 4) * MB), 61 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
65 initial_semispace_size_(Page::kPageSize), 62 initial_semispace_size_(Page::kPageSize),
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
129 crankshaft_codegen_bytes_generated_(0), 126 crankshaft_codegen_bytes_generated_(0),
130 new_space_allocation_counter_(0), 127 new_space_allocation_counter_(0),
131 old_generation_allocation_counter_(0), 128 old_generation_allocation_counter_(0),
132 old_generation_size_at_last_gc_(0), 129 old_generation_size_at_last_gc_(0),
133 gcs_since_last_deopt_(0), 130 gcs_since_last_deopt_(0),
134 allocation_sites_scratchpad_length_(0), 131 allocation_sites_scratchpad_length_(0),
135 ring_buffer_full_(false), 132 ring_buffer_full_(false),
136 ring_buffer_end_(0), 133 ring_buffer_end_(0),
137 promotion_queue_(this), 134 promotion_queue_(this),
138 configured_(false), 135 configured_(false),
139 current_gc_flags_(kNoGCFlags), 136 current_gc_flags_(Heap::kNoGCFlags),
140 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags), 137 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
141 external_string_table_(this), 138 external_string_table_(this),
142 chunks_queued_for_free_(NULL), 139 chunks_queued_for_free_(NULL),
143 concurrent_unmapping_tasks_active_(0), 140 concurrent_unmapping_tasks_active_(0),
144 pending_unmapping_tasks_semaphore_(0), 141 pending_unmapping_tasks_semaphore_(0),
145 gc_callbacks_depth_(0), 142 gc_callbacks_depth_(0),
146 deserialization_complete_(false), 143 deserialization_complete_(false),
147 concurrent_sweeping_enabled_(false), 144 concurrent_sweeping_enabled_(false),
148 strong_roots_list_(NULL) { 145 strong_roots_list_(NULL) {
149 // Allow build-time customization of the max semispace size. Building 146 // Allow build-time customization of the max semispace size. Building
(...skipping 596 matching lines...) Expand 10 before | Expand all | Expand 10 after
746 } 743 }
747 // We must not compact the weak fixed list here, as we may be in the middle 744 // We must not compact the weak fixed list here, as we may be in the middle
748 // of writing to it, when the GC triggered. Instead, we reset the root value. 745 // of writing to it, when the GC triggered. Instead, we reset the root value.
749 set_weak_stack_trace_list(Smi::FromInt(0)); 746 set_weak_stack_trace_list(Smi::FromInt(0));
750 } 747 }
751 748
752 749
753 void Heap::HandleGCRequest() { 750 void Heap::HandleGCRequest() {
754 if (incremental_marking()->request_type() == 751 if (incremental_marking()->request_type() ==
755 IncrementalMarking::COMPLETE_MARKING) { 752 IncrementalMarking::COMPLETE_MARKING) {
756 CollectAllGarbage("GC interrupt", current_gc_flags_, 753 CollectAllGarbage(current_gc_flags_, "GC interrupt",
757 current_gc_callback_flags_); 754 current_gc_callback_flags_);
758 return; 755 return;
759 } 756 }
760 DCHECK(FLAG_overapproximate_weak_closure); 757 DCHECK(FLAG_overapproximate_weak_closure);
761 if (!incremental_marking()->weak_closure_was_overapproximated()) { 758 if (!incremental_marking()->weak_closure_was_overapproximated()) {
762 OverApproximateWeakClosure("GC interrupt"); 759 OverApproximateWeakClosure("GC interrupt");
763 } 760 }
764 } 761 }
765 762
766 763
(...skipping 23 matching lines...) Expand all
790 AllowHeapAllocation allow_allocation; 787 AllowHeapAllocation allow_allocation;
791 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); 788 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
792 VMState<EXTERNAL> state(isolate_); 789 VMState<EXTERNAL> state(isolate_);
793 HandleScope handle_scope(isolate_); 790 HandleScope handle_scope(isolate_);
794 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags); 791 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
795 } 792 }
796 } 793 }
797 } 794 }
798 795
799 796
800 void Heap::CollectAllGarbage(const char* gc_reason, const GCFlags flags, 797 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
801 const v8::GCCallbackFlags gc_callback_flags) { 798 const v8::GCCallbackFlags gc_callback_flags) {
802 // Since we are ignoring the return value, the exact choice of space does 799 // Since we are ignoring the return value, the exact choice of space does
803 // not matter, so long as we do not specify NEW_SPACE, which would not 800 // not matter, so long as we do not specify NEW_SPACE, which would not
804 // cause a full GC. 801 // cause a full GC.
805 CollectGarbage(OLD_SPACE, gc_reason, flags, gc_callback_flags); 802 set_current_gc_flags(flags);
803 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
804 set_current_gc_flags(kNoGCFlags);
806 } 805 }
807 806
808 807
809 void Heap::CollectAllAvailableGarbage(const char* gc_reason) { 808 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
810 // Since we are ignoring the return value, the exact choice of space does 809 // Since we are ignoring the return value, the exact choice of space does
811 // not matter, so long as we do not specify NEW_SPACE, which would not 810 // not matter, so long as we do not specify NEW_SPACE, which would not
812 // cause a full GC. 811 // cause a full GC.
813 // Major GC would invoke weak handle callbacks on weakly reachable 812 // Major GC would invoke weak handle callbacks on weakly reachable
814 // handles, but won't collect weakly reachable objects until next 813 // handles, but won't collect weakly reachable objects until next
815 // major GC. Therefore if we collect aggressively and weak handle callback 814 // major GC. Therefore if we collect aggressively and weak handle callback
816 // has been invoked, we rerun major GC to release objects which become 815 // has been invoked, we rerun major GC to release objects which become
817 // garbage. 816 // garbage.
818 // Note: as weak callbacks can execute arbitrary code, we cannot 817 // Note: as weak callbacks can execute arbitrary code, we cannot
819 // hope that eventually there will be no weak callbacks invocations. 818 // hope that eventually there will be no weak callbacks invocations.
820 // Therefore stop recollecting after several attempts. 819 // Therefore stop recollecting after several attempts.
821 if (isolate()->concurrent_recompilation_enabled()) { 820 if (isolate()->concurrent_recompilation_enabled()) {
822 // The optimizing compiler may be unnecessarily holding on to memory. 821 // The optimizing compiler may be unnecessarily holding on to memory.
823 DisallowHeapAllocation no_recursive_gc; 822 DisallowHeapAllocation no_recursive_gc;
824 isolate()->optimizing_compile_dispatcher()->Flush(); 823 isolate()->optimizing_compile_dispatcher()->Flush();
825 } 824 }
826 isolate()->ClearSerializerData(); 825 isolate()->ClearSerializerData();
827 isolate()->compilation_cache()->Clear(); 826 set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
827 isolate_->compilation_cache()->Clear();
828 const int kMaxNumberOfAttempts = 7; 828 const int kMaxNumberOfAttempts = 7;
829 const int kMinNumberOfAttempts = 2; 829 const int kMinNumberOfAttempts = 2;
830 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { 830 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
831 if (!CollectGarbage(OLD_SPACE, gc_reason, 831 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
832 Heap::kAbortIncrementalMarkingMask | 832 v8::kGCCallbackFlagForced) &&
833 Heap::kReduceMemoryFootprintMask, 833 attempt + 1 >= kMinNumberOfAttempts) {
834 kGCCallbackFlagForced) &&
835 ((attempt + 1) >= kMinNumberOfAttempts)) {
836 break; 834 break;
837 } 835 }
838 } 836 }
837 set_current_gc_flags(kNoGCFlags);
839 new_space_.Shrink(); 838 new_space_.Shrink();
840 UncommitFromSpace(); 839 UncommitFromSpace();
841 } 840 }
842 841
843 842
844 void Heap::EnsureFillerObjectAtTop() { 843 void Heap::EnsureFillerObjectAtTop() {
845 // There may be an allocation memento behind every object in new space. 844 // There may be an allocation memento behind every object in new space.
846 // If we evacuate a not full new space or if we are on the last page of 845 // If we evacuate a not full new space or if we are on the last page of
847 // the new space, then there may be uninitialized memory behind the top 846 // the new space, then there may be uninitialized memory behind the top
848 // pointer of the new space page. We store a filler object there to 847 // pointer of the new space page. We store a filler object there to
849 // identify the unused space. 848 // identify the unused space.
850 Address from_top = new_space_.top(); 849 Address from_top = new_space_.top();
851 // Check that from_top is inside its page (i.e., not at the end). 850 // Check that from_top is inside its page (i.e., not at the end).
852 Address space_end = new_space_.ToSpaceEnd(); 851 Address space_end = new_space_.ToSpaceEnd();
853 if (from_top < space_end) { 852 if (from_top < space_end) {
854 Page* page = Page::FromAddress(from_top); 853 Page* page = Page::FromAddress(from_top);
855 if (page->Contains(from_top)) { 854 if (page->Contains(from_top)) {
856 int remaining_in_page = static_cast<int>(page->area_end() - from_top); 855 int remaining_in_page = static_cast<int>(page->area_end() - from_top);
857 CreateFillerObjectAt(from_top, remaining_in_page); 856 CreateFillerObjectAt(from_top, remaining_in_page);
858 } 857 }
859 } 858 }
860 } 859 }
861 860
862 861
863 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, 862 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
864 const char* collector_reason) { 863 const char* collector_reason,
864 const v8::GCCallbackFlags gc_callback_flags) {
865 // The VM is in the GC state until exiting this function. 865 // The VM is in the GC state until exiting this function.
866 VMState<GC> state(isolate_); 866 VMState<GC> state(isolate_);
867 867
868 #ifdef DEBUG 868 #ifdef DEBUG
869 // Reset the allocation timeout to the GC interval, but make sure to 869 // Reset the allocation timeout to the GC interval, but make sure to
870 // allow at least a few allocations after a collection. The reason 870 // allow at least a few allocations after a collection. The reason
871 // for this is that we have a lot of allocation sequences and we 871 // for this is that we have a lot of allocation sequences and we
872 // assume that a garbage collection will allow the subsequent 872 // assume that a garbage collection will allow the subsequent
873 // allocation attempts to go through. 873 // allocation attempts to go through.
874 allocation_timeout_ = Max(6, FLAG_gc_interval); 874 allocation_timeout_ = Max(6, FLAG_gc_interval);
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
909 { 909 {
910 tracer()->Start(collector, gc_reason, collector_reason); 910 tracer()->Start(collector, gc_reason, collector_reason);
911 DCHECK(AllowHeapAllocation::IsAllowed()); 911 DCHECK(AllowHeapAllocation::IsAllowed());
912 DisallowHeapAllocation no_allocation_during_gc; 912 DisallowHeapAllocation no_allocation_during_gc;
913 GarbageCollectionPrologue(); 913 GarbageCollectionPrologue();
914 914
915 { 915 {
916 HistogramTimerScope histogram_timer_scope( 916 HistogramTimerScope histogram_timer_scope(
917 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() 917 (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
918 : isolate_->counters()->gc_compactor()); 918 : isolate_->counters()->gc_compactor());
919 next_gc_likely_to_collect_more = PerformGarbageCollection(collector); 919 next_gc_likely_to_collect_more =
920 PerformGarbageCollection(collector, gc_callback_flags);
920 } 921 }
921 922
922 GarbageCollectionEpilogue(); 923 GarbageCollectionEpilogue();
923 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) { 924 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
924 isolate()->CheckDetachedContextsAfterGC(); 925 isolate()->CheckDetachedContextsAfterGC();
925 } 926 }
926 927
927 if (collector == MARK_COMPACTOR) { 928 if (collector == MARK_COMPACTOR) {
928 intptr_t committed_memory_after = CommittedOldGenerationMemory(); 929 intptr_t committed_memory_after = CommittedOldGenerationMemory();
929 intptr_t used_memory_after = PromotedSpaceSizeOfObjects(); 930 intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
(...skipping 10 matching lines...) Expand all
940 (detached_contexts()->length() > 0); 941 (detached_contexts()->length() > 0);
941 if (deserialization_complete_) { 942 if (deserialization_complete_) {
942 memory_reducer_->NotifyMarkCompact(event); 943 memory_reducer_->NotifyMarkCompact(event);
943 } 944 }
944 } 945 }
945 946
946 tracer()->Stop(collector); 947 tracer()->Stop(collector);
947 } 948 }
948 949
949 if (collector == MARK_COMPACTOR && 950 if (collector == MARK_COMPACTOR &&
950 (current_gc_callback_flags_ & kGCCallbackFlagForced) != 0) { 951 (gc_callback_flags & kGCCallbackFlagForced) != 0) {
951 isolate()->CountUsage(v8::Isolate::kForcedGC); 952 isolate()->CountUsage(v8::Isolate::kForcedGC);
952 } 953 }
953 954
954 // Start incremental marking for the next cycle. The heap snapshot 955 // Start incremental marking for the next cycle. The heap snapshot
955 // generator needs incremental marking to stay off after it aborted. 956 // generator needs incremental marking to stay off after it aborted.
956 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() && 957 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
957 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { 958 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
958 StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue"); 959 StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
959 } 960 }
960 961
(...skipping 14 matching lines...) Expand all
975 set_retained_maps(ArrayList::cast(empty_fixed_array())); 976 set_retained_maps(ArrayList::cast(empty_fixed_array()));
976 tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis()); 977 tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
977 MemoryReducer::Event event; 978 MemoryReducer::Event event;
978 event.type = MemoryReducer::kContextDisposed; 979 event.type = MemoryReducer::kContextDisposed;
979 event.time_ms = MonotonicallyIncreasingTimeInMs(); 980 event.time_ms = MonotonicallyIncreasingTimeInMs();
980 memory_reducer_->NotifyContextDisposed(event); 981 memory_reducer_->NotifyContextDisposed(event);
981 return ++contexts_disposed_; 982 return ++contexts_disposed_;
982 } 983 }
983 984
984 985
985 void Heap::StartIncrementalMarking(const GCFlags gc_flags, 986 void Heap::StartIncrementalMarking(int gc_flags,
986 const GCCallbackFlags gc_callback_flags, 987 const GCCallbackFlags gc_callback_flags,
987 const char* reason) { 988 const char* reason) {
988 DCHECK(incremental_marking()->IsStopped()); 989 DCHECK(incremental_marking()->IsStopped());
989 set_current_gc_flags(gc_flags); 990 set_current_gc_flags(gc_flags);
990 current_gc_callback_flags_ = gc_callback_flags; 991 current_gc_callback_flags_ = gc_callback_flags;
991 incremental_marking()->Start(reason); 992 incremental_marking()->Start(reason);
992 } 993 }
993 994
994 995
995 void Heap::StartIdleIncrementalMarking() { 996 void Heap::StartIdleIncrementalMarking() {
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1077 chunk.start = free_space_address; 1078 chunk.start = free_space_address;
1078 chunk.end = free_space_address + size; 1079 chunk.end = free_space_address + size;
1079 } else { 1080 } else {
1080 perform_gc = true; 1081 perform_gc = true;
1081 break; 1082 break;
1082 } 1083 }
1083 } 1084 }
1084 } 1085 }
1085 if (perform_gc) { 1086 if (perform_gc) {
1086 if (space == NEW_SPACE) { 1087 if (space == NEW_SPACE) {
1087 CollectGarbageNewSpace("failed to reserve space in the new space"); 1088 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
1088 } else { 1089 } else {
1089 if (counter > 1) { 1090 if (counter > 1) {
1090 CollectAllGarbage( 1091 CollectAllGarbage(
1092 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1091 "failed to reserve space in paged or large " 1093 "failed to reserve space in paged or large "
1092 "object space, trying to reduce memory footprint", 1094 "object space, trying to reduce memory footprint");
1093 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask);
1094 } else { 1095 } else {
1095 CollectAllGarbage( 1096 CollectAllGarbage(
1096 "failed to reserve space in paged or large object space", 1097 kAbortIncrementalMarkingMask,
1097 kAbortIncrementalMarkingMask); 1098 "failed to reserve space in paged or large object space");
1098 } 1099 }
1099 } 1100 }
1100 gc_performed = true; 1101 gc_performed = true;
1101 break; // Abort for-loop over spaces and retry. 1102 break; // Abort for-loop over spaces and retry.
1102 } 1103 }
1103 } 1104 }
1104 } 1105 }
1105 1106
1106 return !gc_performed; 1107 return !gc_performed;
1107 } 1108 }
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
1156 1157
1157 double survival_rate = promotion_ratio_ + semi_space_copied_rate_; 1158 double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1158 tracer()->AddSurvivalRatio(survival_rate); 1159 tracer()->AddSurvivalRatio(survival_rate);
1159 if (survival_rate > kYoungSurvivalRateHighThreshold) { 1160 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1160 high_survival_rate_period_length_++; 1161 high_survival_rate_period_length_++;
1161 } else { 1162 } else {
1162 high_survival_rate_period_length_ = 0; 1163 high_survival_rate_period_length_ = 0;
1163 } 1164 }
1164 } 1165 }
1165 1166
1166 1167 bool Heap::PerformGarbageCollection(
1167 bool Heap::PerformGarbageCollection(GarbageCollector collector) { 1168 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1168 int freed_global_handles = 0; 1169 int freed_global_handles = 0;
1169 1170
1170 if (collector != SCAVENGER) { 1171 if (collector != SCAVENGER) {
1171 PROFILE(isolate_, CodeMovingGCEvent()); 1172 PROFILE(isolate_, CodeMovingGCEvent());
1172 } 1173 }
1173 1174
1174 #ifdef VERIFY_HEAP 1175 #ifdef VERIFY_HEAP
1175 if (FLAG_verify_heap) { 1176 if (FLAG_verify_heap) {
1176 VerifyStringTable(this); 1177 VerifyStringTable(this);
1177 } 1178 }
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
1234 mark_compact_collector_.EnsureMarkingDequeIsCommitted( 1235 mark_compact_collector_.EnsureMarkingDequeIsCommitted(
1235 MarkCompactCollector::kMinMarkingDequeSize); 1236 MarkCompactCollector::kMinMarkingDequeSize);
1236 } 1237 }
1237 1238
1238 gc_post_processing_depth_++; 1239 gc_post_processing_depth_++;
1239 { 1240 {
1240 AllowHeapAllocation allow_allocation; 1241 AllowHeapAllocation allow_allocation;
1241 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); 1242 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1242 freed_global_handles = 1243 freed_global_handles =
1243 isolate_->global_handles()->PostGarbageCollectionProcessing( 1244 isolate_->global_handles()->PostGarbageCollectionProcessing(
1244 collector, current_gc_callback_flags_); 1245 collector, gc_callback_flags);
1245 } 1246 }
1246 gc_post_processing_depth_--; 1247 gc_post_processing_depth_--;
1247 1248
1248 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); 1249 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1249 1250
1250 // Update relocatables. 1251 // Update relocatables.
1251 Relocatable::PostGarbageCollectionProcessing(isolate_); 1252 Relocatable::PostGarbageCollectionProcessing(isolate_);
1252 1253
1253 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond(); 1254 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1254 double mutator_speed = static_cast<double>( 1255 double mutator_speed = static_cast<double>(
(...skipping 10 matching lines...) Expand all
1265 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); 1266 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1266 } 1267 }
1267 1268
1268 { 1269 {
1269 GCCallbacksScope scope(this); 1270 GCCallbacksScope scope(this);
1270 if (scope.CheckReenter()) { 1271 if (scope.CheckReenter()) {
1271 AllowHeapAllocation allow_allocation; 1272 AllowHeapAllocation allow_allocation;
1272 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); 1273 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1273 VMState<EXTERNAL> state(isolate_); 1274 VMState<EXTERNAL> state(isolate_);
1274 HandleScope handle_scope(isolate_); 1275 HandleScope handle_scope(isolate_);
1275 CallGCEpilogueCallbacks(gc_type, current_gc_callback_flags_); 1276 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1276 } 1277 }
1277 } 1278 }
1278 1279
1279 #ifdef VERIFY_HEAP 1280 #ifdef VERIFY_HEAP
1280 if (FLAG_verify_heap) { 1281 if (FLAG_verify_heap) {
1281 VerifyStringTable(this); 1282 VerifyStringTable(this);
1282 } 1283 }
1283 #endif 1284 #endif
1284 1285
1285 return freed_global_handles > 0; 1286 return freed_global_handles > 0;
(...skipping 3243 matching lines...) Expand 10 before | Expand all | Expand 10 after
4529 bool Heap::IsHeapIterable() { 4530 bool Heap::IsHeapIterable() {
4530 // TODO(hpayer): This function is not correct. Allocation folding in old 4531 // TODO(hpayer): This function is not correct. Allocation folding in old
4531 // space breaks the iterability. 4532 // space breaks the iterability.
4532 return new_space_top_after_last_gc_ == new_space()->top(); 4533 return new_space_top_after_last_gc_ == new_space()->top();
4533 } 4534 }
4534 4535
4535 4536
4536 void Heap::MakeHeapIterable() { 4537 void Heap::MakeHeapIterable() {
4537 DCHECK(AllowHeapAllocation::IsAllowed()); 4538 DCHECK(AllowHeapAllocation::IsAllowed());
4538 if (!IsHeapIterable()) { 4539 if (!IsHeapIterable()) {
4539 CollectAllGarbage("Heap::MakeHeapIterable", kMakeHeapIterableMask); 4540 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4540 } 4541 }
4541 if (mark_compact_collector()->sweeping_in_progress()) { 4542 if (mark_compact_collector()->sweeping_in_progress()) {
4542 mark_compact_collector()->EnsureSweepingCompleted(); 4543 mark_compact_collector()->EnsureSweepingCompleted();
4543 } 4544 }
4544 DCHECK(IsHeapIterable()); 4545 DCHECK(IsHeapIterable());
4545 } 4546 }
4546 4547
4547 4548
4548 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) { 4549 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
4549 const double kMinMutatorUtilization = 0.0; 4550 const double kMinMutatorUtilization = 0.0;
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
4654 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure( 4655 gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure(
4655 static_cast<size_t>(idle_time_in_ms))))) { 4656 static_cast<size_t>(idle_time_in_ms))))) {
4656 OverApproximateWeakClosure( 4657 OverApproximateWeakClosure(
4657 "Idle notification: overapproximate weak closure"); 4658 "Idle notification: overapproximate weak closure");
4658 return true; 4659 return true;
4659 } else if (incremental_marking()->IsComplete() || 4660 } else if (incremental_marking()->IsComplete() ||
4660 (mark_compact_collector_.marking_deque()->IsEmpty() && 4661 (mark_compact_collector_.marking_deque()->IsEmpty() &&
4661 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact( 4662 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
4662 static_cast<size_t>(idle_time_in_ms), size_of_objects, 4663 static_cast<size_t>(idle_time_in_ms), size_of_objects,
4663 final_incremental_mark_compact_speed_in_bytes_per_ms))) { 4664 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4664 CollectAllGarbage("idle notification: finalize incremental", 4665 CollectAllGarbage(current_gc_flags_,
4665 current_gc_flags_); 4666 "idle notification: finalize incremental");
4666 return true; 4667 return true;
4667 } 4668 }
4668 return false; 4669 return false;
4669 } 4670 }
4670 4671
4671 4672
4672 GCIdleTimeHandler::HeapState Heap::ComputeHeapState() { 4673 GCIdleTimeHandler::HeapState Heap::ComputeHeapState() {
4673 GCIdleTimeHandler::HeapState heap_state; 4674 GCIdleTimeHandler::HeapState heap_state;
4674 heap_state.contexts_disposed = contexts_disposed_; 4675 heap_state.contexts_disposed = contexts_disposed_;
4675 heap_state.contexts_disposal_rate = 4676 heap_state.contexts_disposal_rate =
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
4738 if (remaining_idle_time_in_ms > 0.0) { 4739 if (remaining_idle_time_in_ms > 0.0) {
4739 action.additional_work = TryFinalizeIdleIncrementalMarking( 4740 action.additional_work = TryFinalizeIdleIncrementalMarking(
4740 remaining_idle_time_in_ms, heap_state.size_of_objects, 4741 remaining_idle_time_in_ms, heap_state.size_of_objects,
4741 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms); 4742 heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
4742 } 4743 }
4743 break; 4744 break;
4744 } 4745 }
4745 case DO_FULL_GC: { 4746 case DO_FULL_GC: {
4746 DCHECK(contexts_disposed_ > 0); 4747 DCHECK(contexts_disposed_ > 0);
4747 HistogramTimerScope scope(isolate_->counters()->gc_context()); 4748 HistogramTimerScope scope(isolate_->counters()->gc_context());
4748 CollectAllGarbage("idle notification: contexts disposed", kNoGCFlags); 4749 CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
4749 break; 4750 break;
4750 } 4751 }
4751 case DO_SCAVENGE: 4752 case DO_SCAVENGE:
4752 CollectGarbageNewSpace("idle notification: scavenge"); 4753 CollectGarbage(NEW_SPACE, "idle notification: scavenge");
4753 break; 4754 break;
4754 case DO_FINALIZE_SWEEPING: 4755 case DO_FINALIZE_SWEEPING:
4755 mark_compact_collector()->EnsureSweepingCompleted(); 4756 mark_compact_collector()->EnsureSweepingCompleted();
4756 break; 4757 break;
4757 case DO_NOTHING: 4758 case DO_NOTHING:
4758 break; 4759 break;
4759 } 4760 }
4760 4761
4761 return result; 4762 return result;
4762 } 4763 }
(...skipping 2010 matching lines...) Expand 10 before | Expand all | Expand 10 after
6773 *object_type = "CODE_TYPE"; \ 6774 *object_type = "CODE_TYPE"; \
6774 *object_sub_type = "CODE_AGE/" #name; \ 6775 *object_sub_type = "CODE_AGE/" #name; \
6775 return true; 6776 return true;
6776 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) 6777 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6777 #undef COMPARE_AND_RETURN_NAME 6778 #undef COMPARE_AND_RETURN_NAME
6778 } 6779 }
6779 return false; 6780 return false;
6780 } 6781 }
6781 } // namespace internal 6782 } // namespace internal
6782 } // namespace v8 6783 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698