Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(144)

Side by Side Diff: src/heap.cc

Issue 352763002: Grow heap slower if GC freed many global handles. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Add comment Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/once.h" 9 #include "src/base/once.h"
10 #include "src/bootstrapper.h" 10 #include "src/bootstrapper.h"
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
54 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), 54 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
55 max_semi_space_size_(8 * (kPointerSize / 4) * MB), 55 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
56 initial_semispace_size_(Page::kPageSize), 56 initial_semispace_size_(Page::kPageSize),
57 max_old_generation_size_(700ul * (kPointerSize / 4) * MB), 57 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
58 max_executable_size_(256ul * (kPointerSize / 4) * MB), 58 max_executable_size_(256ul * (kPointerSize / 4) * MB),
59 // Variables set based on semispace_size_ and old_generation_size_ in 59 // Variables set based on semispace_size_ and old_generation_size_ in
60 // ConfigureHeap. 60 // ConfigureHeap.
61 // Will be 4 * reserved_semispace_size_ to ensure that young 61 // Will be 4 * reserved_semispace_size_ to ensure that young
62 // generation can be aligned to its size. 62 // generation can be aligned to its size.
63 maximum_committed_(0), 63 maximum_committed_(0),
64 old_space_growing_factor_(4),
65 survived_since_last_expansion_(0), 64 survived_since_last_expansion_(0),
66 sweep_generation_(0), 65 sweep_generation_(0),
67 always_allocate_scope_depth_(0), 66 always_allocate_scope_depth_(0),
68 contexts_disposed_(0), 67 contexts_disposed_(0),
69 global_ic_age_(0), 68 global_ic_age_(0),
70 flush_monomorphic_ics_(false), 69 flush_monomorphic_ics_(false),
71 scan_on_scavenge_pages_(0), 70 scan_on_scavenge_pages_(0),
72 new_space_(this), 71 new_space_(this),
73 old_pointer_space_(NULL), 72 old_pointer_space_(NULL),
74 old_data_space_(NULL), 73 old_data_space_(NULL),
75 code_space_(NULL), 74 code_space_(NULL),
76 map_space_(NULL), 75 map_space_(NULL),
77 cell_space_(NULL), 76 cell_space_(NULL),
78 property_cell_space_(NULL), 77 property_cell_space_(NULL),
79 lo_space_(NULL), 78 lo_space_(NULL),
80 gc_state_(NOT_IN_GC), 79 gc_state_(NOT_IN_GC),
81 gc_post_processing_depth_(0), 80 gc_post_processing_depth_(0),
82 allocations_count_(0), 81 allocations_count_(0),
83 raw_allocations_hash_(0), 82 raw_allocations_hash_(0),
84 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc), 83 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
85 ms_count_(0), 84 ms_count_(0),
86 gc_count_(0), 85 gc_count_(0),
87 remembered_unmapped_pages_index_(0), 86 remembered_unmapped_pages_index_(0),
88 unflattened_strings_length_(0), 87 unflattened_strings_length_(0),
89 #ifdef DEBUG 88 #ifdef DEBUG
90 allocation_timeout_(0), 89 allocation_timeout_(0),
91 #endif // DEBUG 90 #endif // DEBUG
92 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit), 91 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
93 size_of_old_gen_at_last_old_space_gc_(0),
94 old_gen_exhausted_(false), 92 old_gen_exhausted_(false),
95 inline_allocation_disabled_(false), 93 inline_allocation_disabled_(false),
96 store_buffer_rebuilder_(store_buffer()), 94 store_buffer_rebuilder_(store_buffer()),
97 hidden_string_(NULL), 95 hidden_string_(NULL),
98 gc_safe_size_of_old_object_(NULL), 96 gc_safe_size_of_old_object_(NULL),
99 total_regexp_code_generated_(0), 97 total_regexp_code_generated_(0),
100 tracer_(NULL), 98 tracer_(NULL),
101 high_survival_rate_period_length_(0), 99 high_survival_rate_period_length_(0),
102 promoted_objects_size_(0), 100 promoted_objects_size_(0),
103 promotion_rate_(0), 101 promotion_rate_(0),
(...skipping 945 matching lines...) Expand 10 before | Expand all | Expand 10 after
1049 high_survival_rate_period_length_++; 1047 high_survival_rate_period_length_++;
1050 } else { 1048 } else {
1051 high_survival_rate_period_length_ = 0; 1049 high_survival_rate_period_length_ = 0;
1052 } 1050 }
1053 } 1051 }
1054 1052
1055 bool Heap::PerformGarbageCollection( 1053 bool Heap::PerformGarbageCollection(
1056 GarbageCollector collector, 1054 GarbageCollector collector,
1057 GCTracer* tracer, 1055 GCTracer* tracer,
1058 const v8::GCCallbackFlags gc_callback_flags) { 1056 const v8::GCCallbackFlags gc_callback_flags) {
1059 bool next_gc_likely_to_collect_more = false; 1057 int freed_global_handles = 0;
1060 1058
1061 if (collector != SCAVENGER) { 1059 if (collector != SCAVENGER) {
1062 PROFILE(isolate_, CodeMovingGCEvent()); 1060 PROFILE(isolate_, CodeMovingGCEvent());
1063 } 1061 }
1064 1062
1065 #ifdef VERIFY_HEAP 1063 #ifdef VERIFY_HEAP
1066 if (FLAG_verify_heap) { 1064 if (FLAG_verify_heap) {
1067 VerifyStringTable(this); 1065 VerifyStringTable(this);
1068 } 1066 }
1069 #endif 1067 #endif
(...skipping 19 matching lines...) Expand all
1089 // We speed up the incremental marker if it is running so that it 1087 // We speed up the incremental marker if it is running so that it
1090 // does not fall behind the rate of promotion, which would cause a 1088 // does not fall behind the rate of promotion, which would cause a
1091 // constantly growing old space. 1089 // constantly growing old space.
1092 incremental_marking()->NotifyOfHighPromotionRate(); 1090 incremental_marking()->NotifyOfHighPromotionRate();
1093 } 1091 }
1094 1092
1095 if (collector == MARK_COMPACTOR) { 1093 if (collector == MARK_COMPACTOR) {
1096 // Perform mark-sweep with optional compaction. 1094 // Perform mark-sweep with optional compaction.
1097 MarkCompact(tracer); 1095 MarkCompact(tracer);
1098 sweep_generation_++; 1096 sweep_generation_++;
1099 1097 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1100 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects(); 1098 // allocates and triggers GC. The real limit is set at after
1101 1099 // PostGarbageCollectionProcessing.
1102 old_generation_allocation_limit_ = 1100 old_generation_allocation_limit_ =
1103 OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_); 1101 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1104
1105 old_gen_exhausted_ = false; 1102 old_gen_exhausted_ = false;
1106 } else { 1103 } else {
1107 tracer_ = tracer; 1104 tracer_ = tracer;
1108 Scavenge(); 1105 Scavenge();
1109 tracer_ = NULL; 1106 tracer_ = NULL;
1110 } 1107 }
1111 1108
1112 UpdateSurvivalStatistics(start_new_space_size); 1109 UpdateSurvivalStatistics(start_new_space_size);
1113 1110
1114 isolate_->counters()->objs_since_last_young()->Set(0); 1111 isolate_->counters()->objs_since_last_young()->Set(0);
1115 1112
1116 // Callbacks that fire after this point might trigger nested GCs and 1113 // Callbacks that fire after this point might trigger nested GCs and
1117 // restart incremental marking, the assertion can't be moved down. 1114 // restart incremental marking, the assertion can't be moved down.
1118 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped()); 1115 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1119 1116
1120 gc_post_processing_depth_++; 1117 gc_post_processing_depth_++;
1121 { AllowHeapAllocation allow_allocation; 1118 { AllowHeapAllocation allow_allocation;
1122 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); 1119 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1123 next_gc_likely_to_collect_more = 1120 freed_global_handles =
1124 isolate_->global_handles()->PostGarbageCollectionProcessing( 1121 isolate_->global_handles()->PostGarbageCollectionProcessing(
1125 collector, tracer); 1122 collector, tracer);
1126 } 1123 }
1127 gc_post_processing_depth_--; 1124 gc_post_processing_depth_--;
1128 1125
1129 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); 1126 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1130 1127
1131 // Update relocatables. 1128 // Update relocatables.
1132 Relocatable::PostGarbageCollectionProcessing(isolate_); 1129 Relocatable::PostGarbageCollectionProcessing(isolate_);
1133 1130
1134 if (collector == MARK_COMPACTOR) { 1131 if (collector == MARK_COMPACTOR) {
1135 // Register the amount of external allocated memory. 1132 // Register the amount of external allocated memory.
1136 amount_of_external_allocated_memory_at_last_global_gc_ = 1133 amount_of_external_allocated_memory_at_last_global_gc_ =
1137 amount_of_external_allocated_memory_; 1134 amount_of_external_allocated_memory_;
1135 old_generation_allocation_limit_ =
1136 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
1137 freed_global_handles);
1138 } 1138 }
1139 1139
1140 { GCCallbacksScope scope(this); 1140 { GCCallbacksScope scope(this);
1141 if (scope.CheckReenter()) { 1141 if (scope.CheckReenter()) {
1142 AllowHeapAllocation allow_allocation; 1142 AllowHeapAllocation allow_allocation;
1143 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); 1143 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1144 VMState<EXTERNAL> state(isolate_); 1144 VMState<EXTERNAL> state(isolate_);
1145 HandleScope handle_scope(isolate_); 1145 HandleScope handle_scope(isolate_);
1146 CallGCEpilogueCallbacks(gc_type, gc_callback_flags); 1146 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1147 } 1147 }
1148 } 1148 }
1149 1149
1150 #ifdef VERIFY_HEAP 1150 #ifdef VERIFY_HEAP
1151 if (FLAG_verify_heap) { 1151 if (FLAG_verify_heap) {
1152 VerifyStringTable(this); 1152 VerifyStringTable(this);
1153 } 1153 }
1154 #endif 1154 #endif
1155 1155
1156 return next_gc_likely_to_collect_more; 1156 return freed_global_handles > 0;
1157 } 1157 }
1158 1158
1159 1159
1160 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { 1160 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1161 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { 1161 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1162 if (gc_type & gc_prologue_callbacks_[i].gc_type) { 1162 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1163 if (!gc_prologue_callbacks_[i].pass_isolate_) { 1163 if (!gc_prologue_callbacks_[i].pass_isolate_) {
1164 v8::GCPrologueCallback callback = 1164 v8::GCPrologueCallback callback =
1165 reinterpret_cast<v8::GCPrologueCallback>( 1165 reinterpret_cast<v8::GCPrologueCallback>(
1166 gc_prologue_callbacks_[i].callback); 1166 gc_prologue_callbacks_[i].callback);
(...skipping 3815 matching lines...) Expand 10 before | Expand all | Expand 10 after
4982 max_old_generation_size_); 4982 max_old_generation_size_);
4983 4983
4984 // We rely on being able to allocate new arrays in paged spaces. 4984 // We rely on being able to allocate new arrays in paged spaces.
4985 ASSERT(Page::kMaxRegularHeapObjectSize >= 4985 ASSERT(Page::kMaxRegularHeapObjectSize >=
4986 (JSArray::kSize + 4986 (JSArray::kSize +
4987 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) + 4987 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
4988 AllocationMemento::kSize)); 4988 AllocationMemento::kSize));
4989 4989
4990 code_range_size_ = code_range_size * MB; 4990 code_range_size_ = code_range_size * MB;
4991 4991
4992 // We set the old generation growing factor to 2 to grow the heap slower on
4993 // memory-constrained devices.
4994 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
4995 old_space_growing_factor_ = 2;
4996 }
4997
4998 configured_ = true; 4992 configured_ = true;
4999 return true; 4993 return true;
5000 } 4994 }
5001 4995
5002 4996
5003 bool Heap::ConfigureHeapDefault() { 4997 bool Heap::ConfigureHeapDefault() {
5004 return ConfigureHeap(0, 0, 0, 0); 4998 return ConfigureHeap(0, 0, 0, 0);
5005 } 4999 }
5006 5000
5007 5001
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
5056 5050
5057 5051
5058 int64_t Heap::PromotedExternalMemorySize() { 5052 int64_t Heap::PromotedExternalMemorySize() {
5059 if (amount_of_external_allocated_memory_ 5053 if (amount_of_external_allocated_memory_
5060 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0; 5054 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5061 return amount_of_external_allocated_memory_ 5055 return amount_of_external_allocated_memory_
5062 - amount_of_external_allocated_memory_at_last_global_gc_; 5056 - amount_of_external_allocated_memory_at_last_global_gc_;
5063 } 5057 }
5064 5058
5065 5059
5060 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
5061 int freed_global_handles) {
5062 const int kMaxHandles = 1000;
5063 const int kMinHandles = 100;
5064 double min_factor = 1.1;
5065 double max_factor = 4;
5066 // We set the old generation growing factor to 2 to grow the heap slower on
5067 // memory-constrained devices.
5068 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5069 max_factor = 2;
5070 }
5071 // If there are many freed global handles, then the next full GC will
5072 // likely collect a lot of garbage. Choose the heap growing factor
5073 // depending on freed global handles.
5074 // TODO(ulan, hpayer): Take into account mutator utilization.
5075 double factor;
5076 if (freed_global_handles <= kMinHandles) {
5077 factor = max_factor;
5078 } else if (freed_global_handles >= kMaxHandles) {
5079 factor = min_factor;
5080 } else {
5081 // Compute factor using linear interpolation between points
5082 // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5083 factor = max_factor -
5084 (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5085 (kMaxHandles - kMinHandles);
5086 }
5087
5088 if (FLAG_stress_compaction ||
5089 mark_compact_collector()->reduce_memory_footprint_) {
5090 factor = min_factor;
5091 }
5092
5093 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5094 limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5095 limit += new_space_.Capacity();
5096 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5097 return Min(limit, halfway_to_the_max);
5098 }
5099
5100
5066 void Heap::EnableInlineAllocation() { 5101 void Heap::EnableInlineAllocation() {
5067 if (!inline_allocation_disabled_) return; 5102 if (!inline_allocation_disabled_) return;
5068 inline_allocation_disabled_ = false; 5103 inline_allocation_disabled_ = false;
5069 5104
5070 // Update inline allocation limit for new space. 5105 // Update inline allocation limit for new space.
5071 new_space()->UpdateInlineAllocationLimit(0); 5106 new_space()->UpdateInlineAllocationLimit(0);
5072 } 5107 }
5073 5108
5074 5109
5075 void Heap::DisableInlineAllocation() { 5110 void Heap::DisableInlineAllocation() {
(...skipping 1324 matching lines...) Expand 10 before | Expand all | Expand 10 after
6400 static_cast<int>(object_sizes_last_time_[index])); 6435 static_cast<int>(object_sizes_last_time_[index]));
6401 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) 6436 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6402 #undef ADJUST_LAST_TIME_OBJECT_COUNT 6437 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6403 6438
6404 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); 6439 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6405 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); 6440 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6406 ClearObjectStats(); 6441 ClearObjectStats();
6407 } 6442 }
6408 6443
6409 } } // namespace v8::internal 6444 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698