Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(46)

Side by Side Diff: src/heap/heap.cc

Issue 1301183002: [heap] Cleanup and fix GC flags / add testing infra (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Addressed Hannes' comments Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
127 crankshaft_codegen_bytes_generated_(0), 127 crankshaft_codegen_bytes_generated_(0),
128 new_space_allocation_counter_(0), 128 new_space_allocation_counter_(0),
129 old_generation_allocation_counter_(0), 129 old_generation_allocation_counter_(0),
130 old_generation_size_at_last_gc_(0), 130 old_generation_size_at_last_gc_(0),
131 gcs_since_last_deopt_(0), 131 gcs_since_last_deopt_(0),
132 allocation_sites_scratchpad_length_(0), 132 allocation_sites_scratchpad_length_(0),
133 ring_buffer_full_(false), 133 ring_buffer_full_(false),
134 ring_buffer_end_(0), 134 ring_buffer_end_(0),
135 promotion_queue_(this), 135 promotion_queue_(this),
136 configured_(false), 136 configured_(false),
137 current_gc_flags_(Heap::kNoGCFlags),
137 external_string_table_(this), 138 external_string_table_(this),
138 chunks_queued_for_free_(NULL), 139 chunks_queued_for_free_(NULL),
139 gc_callbacks_depth_(0), 140 gc_callbacks_depth_(0),
140 deserialization_complete_(false), 141 deserialization_complete_(false),
141 concurrent_sweeping_enabled_(false), 142 concurrent_sweeping_enabled_(false),
142 strong_roots_list_(NULL) { 143 strong_roots_list_(NULL) {
143 // Allow build-time customization of the max semispace size. Building 144 // Allow build-time customization of the max semispace size. Building
144 // V8 with snapshots and a non-default max semispace size is much 145 // V8 with snapshots and a non-default max semispace size is much
145 // easier if you can define it as part of the build environment. 146 // easier if you can define it as part of the build environment.
146 #if defined(V8_MAX_SEMISPACE_SIZE) 147 #if defined(V8_MAX_SEMISPACE_SIZE)
(...skipping 644 matching lines...) Expand 10 before | Expand all | Expand 10 after
791 } 792 }
792 } 793 }
793 } 794 }
794 795
795 796
796 void Heap::CollectAllGarbage(int flags, const char* gc_reason, 797 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
797 const v8::GCCallbackFlags gc_callback_flags) { 798 const v8::GCCallbackFlags gc_callback_flags) {
798 // Since we are ignoring the return value, the exact choice of space does 799 // Since we are ignoring the return value, the exact choice of space does
799 // not matter, so long as we do not specify NEW_SPACE, which would not 800 // not matter, so long as we do not specify NEW_SPACE, which would not
800 // cause a full GC. 801 // cause a full GC.
801 mark_compact_collector_.SetFlags(flags); 802 set_current_gc_flags(flags);
802 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags); 803 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
803 mark_compact_collector_.SetFlags(kNoGCFlags); 804 set_current_gc_flags(kNoGCFlags);
804 } 805 }
805 806
806 807
807 void Heap::CollectAllAvailableGarbage(const char* gc_reason) { 808 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
808 // Since we are ignoring the return value, the exact choice of space does 809 // Since we are ignoring the return value, the exact choice of space does
809 // not matter, so long as we do not specify NEW_SPACE, which would not 810 // not matter, so long as we do not specify NEW_SPACE, which would not
810 // cause a full GC. 811 // cause a full GC.
811 // Major GC would invoke weak handle callbacks on weakly reachable 812 // Major GC would invoke weak handle callbacks on weakly reachable
812 // handles, but won't collect weakly reachable objects until next 813 // handles, but won't collect weakly reachable objects until next
813 // major GC. Therefore if we collect aggressively and weak handle callback 814 // major GC. Therefore if we collect aggressively and weak handle callback
814 // has been invoked, we rerun major GC to release objects which become 815 // has been invoked, we rerun major GC to release objects which become
815 // garbage. 816 // garbage.
816 // Note: as weak callbacks can execute arbitrary code, we cannot 817 // Note: as weak callbacks can execute arbitrary code, we cannot
817 // hope that eventually there will be no weak callbacks invocations. 818 // hope that eventually there will be no weak callbacks invocations.
818 // Therefore stop recollecting after several attempts. 819 // Therefore stop recollecting after several attempts.
819 if (isolate()->concurrent_recompilation_enabled()) { 820 if (isolate()->concurrent_recompilation_enabled()) {
820 // The optimizing compiler may be unnecessarily holding on to memory. 821 // The optimizing compiler may be unnecessarily holding on to memory.
821 DisallowHeapAllocation no_recursive_gc; 822 DisallowHeapAllocation no_recursive_gc;
822 isolate()->optimizing_compile_dispatcher()->Flush(); 823 isolate()->optimizing_compile_dispatcher()->Flush();
823 } 824 }
824 isolate()->ClearSerializerData(); 825 isolate()->ClearSerializerData();
825 mark_compact_collector()->SetFlags(kMakeHeapIterableMask | 826 set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
826 kReduceMemoryFootprintMask);
827 isolate_->compilation_cache()->Clear(); 827 isolate_->compilation_cache()->Clear();
828 const int kMaxNumberOfAttempts = 7; 828 const int kMaxNumberOfAttempts = 7;
829 const int kMinNumberOfAttempts = 2; 829 const int kMinNumberOfAttempts = 2;
830 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { 830 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
831 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL, 831 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
832 v8::kGCCallbackFlagForced) && 832 v8::kGCCallbackFlagForced) &&
833 attempt + 1 >= kMinNumberOfAttempts) { 833 attempt + 1 >= kMinNumberOfAttempts) {
834 break; 834 break;
835 } 835 }
836 } 836 }
837 mark_compact_collector()->SetFlags(kNoGCFlags); 837 set_current_gc_flags(kNoGCFlags);
838 new_space_.Shrink(); 838 new_space_.Shrink();
839 UncommitFromSpace(); 839 UncommitFromSpace();
840 } 840 }
841 841
842 842
843 void Heap::EnsureFillerObjectAtTop() { 843 void Heap::EnsureFillerObjectAtTop() {
844 // There may be an allocation memento behind every object in new space. 844 // There may be an allocation memento behind every object in new space.
845 // If we evacuate a not full new space or if we are on the last page of 845 // If we evacuate a not full new space or if we are on the last page of
846 // the new space, then there may be uninitialized memory behind the top 846 // the new space, then there may be uninitialized memory behind the top
847 // pointer of the new space page. We store a filler object there to 847 // pointer of the new space page. We store a filler object there to
(...skipping 27 matching lines...) Expand all
875 #endif 875 #endif
876 876
877 EnsureFillerObjectAtTop(); 877 EnsureFillerObjectAtTop();
878 878
879 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) { 879 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
880 if (FLAG_trace_incremental_marking) { 880 if (FLAG_trace_incremental_marking) {
881 PrintF("[IncrementalMarking] Scavenge during marking.\n"); 881 PrintF("[IncrementalMarking] Scavenge during marking.\n");
882 } 882 }
883 } 883 }
884 884
885 if (collector == MARK_COMPACTOR && 885 if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
886 !mark_compact_collector()->finalize_incremental_marking() && 886 !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
887 !mark_compact_collector()->abort_incremental_marking() &&
888 !incremental_marking()->IsStopped() &&
889 !incremental_marking()->should_hurry() && FLAG_incremental_marking) { 887 !incremental_marking()->should_hurry() && FLAG_incremental_marking) {
890 // Make progress in incremental marking. 888 // Make progress in incremental marking.
891 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB; 889 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
892 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge, 890 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
893 IncrementalMarking::NO_GC_VIA_STACK_GUARD); 891 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
894 if (!incremental_marking()->IsComplete() && 892 if (!incremental_marking()->IsComplete() &&
895 !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) { 893 !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) {
896 if (FLAG_trace_incremental_marking) { 894 if (FLAG_trace_incremental_marking) {
897 PrintF("[IncrementalMarking] Delaying MarkSweep.\n"); 895 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
898 } 896 }
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
949 tracer()->Stop(collector); 947 tracer()->Stop(collector);
950 } 948 }
951 949
952 if (collector == MARK_COMPACTOR && 950 if (collector == MARK_COMPACTOR &&
953 (gc_callback_flags & kGCCallbackFlagForced) != 0) { 951 (gc_callback_flags & kGCCallbackFlagForced) != 0) {
954 isolate()->CountUsage(v8::Isolate::kForcedGC); 952 isolate()->CountUsage(v8::Isolate::kForcedGC);
955 } 953 }
956 954
957 // Start incremental marking for the next cycle. The heap snapshot 955 // Start incremental marking for the next cycle. The heap snapshot
958 // generator needs incremental marking to stay off after it aborted. 956 // generator needs incremental marking to stay off after it aborted.
959 if (!mark_compact_collector()->abort_incremental_marking() && 957 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
960 incremental_marking()->IsStopped() &&
961 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { 958 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
962 incremental_marking()->Start(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue"); 959 incremental_marking()->Start(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
963 } 960 }
964 961
965 return next_gc_likely_to_collect_more; 962 return next_gc_likely_to_collect_more;
966 } 963 }
967 964
968 965
969 int Heap::NotifyContextDisposed(bool dependant_context) { 966 int Heap::NotifyContextDisposed(bool dependant_context) {
970 if (!dependant_context) { 967 if (!dependant_context) {
(...skipping 4686 matching lines...) Expand 10 before | Expand all | Expand 10 after
5657 // memory-constrained devices. 5654 // memory-constrained devices.
5658 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice || 5655 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice ||
5659 FLAG_optimize_for_size) { 5656 FLAG_optimize_for_size) {
5660 factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained); 5657 factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
5661 } 5658 }
5662 5659
5663 if (memory_reducer_.ShouldGrowHeapSlowly() || optimize_for_memory_usage_) { 5660 if (memory_reducer_.ShouldGrowHeapSlowly() || optimize_for_memory_usage_) {
5664 factor = Min(factor, kConservativeHeapGrowingFactor); 5661 factor = Min(factor, kConservativeHeapGrowingFactor);
5665 } 5662 }
5666 5663
5667 if (FLAG_stress_compaction || 5664 if (FLAG_stress_compaction || ShouldReduceMemory()) {
5668 mark_compact_collector()->reduce_memory_footprint_) {
5669 factor = kMinHeapGrowingFactor; 5665 factor = kMinHeapGrowingFactor;
5670 } 5666 }
5671 5667
5672 old_generation_allocation_limit_ = 5668 old_generation_allocation_limit_ =
5673 CalculateOldGenerationAllocationLimit(factor, old_gen_size); 5669 CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5674 5670
5675 if (FLAG_trace_gc_verbose) { 5671 if (FLAG_trace_gc_verbose) {
5676 PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX 5672 PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX
5677 "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n", 5673 "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
5678 old_gen_size / KB, old_generation_allocation_limit_ / KB, 5674 old_gen_size / KB, old_generation_allocation_limit_ / KB,
(...skipping 1224 matching lines...) Expand 10 before | Expand all | Expand 10 after
6903 *object_type = "CODE_TYPE"; \ 6899 *object_type = "CODE_TYPE"; \
6904 *object_sub_type = "CODE_AGE/" #name; \ 6900 *object_sub_type = "CODE_AGE/" #name; \
6905 return true; 6901 return true;
6906 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) 6902 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6907 #undef COMPARE_AND_RETURN_NAME 6903 #undef COMPARE_AND_RETURN_NAME
6908 } 6904 }
6909 return false; 6905 return false;
6910 } 6906 }
6911 } // namespace internal 6907 } // namespace internal
6912 } // namespace v8 6908 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/incremental-marking.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698