OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/once.h" | 10 #include "src/base/once.h" |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
59 : amount_of_external_allocated_memory_(0), | 59 : amount_of_external_allocated_memory_(0), |
60 amount_of_external_allocated_memory_at_last_global_gc_(0), | 60 amount_of_external_allocated_memory_at_last_global_gc_(0), |
61 isolate_(NULL), | 61 isolate_(NULL), |
62 code_range_size_(0), | 62 code_range_size_(0), |
63 // semispace_size_ should be a power of 2 and old_generation_size_ should | 63 // semispace_size_ should be a power of 2 and old_generation_size_ should |
64 // be a multiple of Page::kPageSize. | 64 // be a multiple of Page::kPageSize. |
65 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), | 65 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), |
66 max_semi_space_size_(8 * (kPointerSize / 4) * MB), | 66 max_semi_space_size_(8 * (kPointerSize / 4) * MB), |
67 initial_semispace_size_(Page::kPageSize), | 67 initial_semispace_size_(Page::kPageSize), |
68 target_semispace_size_(Page::kPageSize), | 68 target_semispace_size_(Page::kPageSize), |
69 max_old_generation_size_(700ul * (kPointerSize / 4) * MB), | 69 max_old_generation_size_(kDefaultMaxOldGenSize), |
70 initial_old_generation_size_(max_old_generation_size_ / | 70 initial_old_generation_size_(max_old_generation_size_ / |
71 kInitalOldGenerationLimitFactor), | 71 kInitalOldGenerationLimitFactor), |
72 old_generation_size_configured_(false), | 72 old_generation_size_configured_(false), |
73 max_executable_size_(256ul * (kPointerSize / 4) * MB), | 73 max_executable_size_(256ul * (kPointerSize / 4) * MB), |
74 // Variables set based on semispace_size_ and old_generation_size_ in | 74 // Variables set based on semispace_size_ and old_generation_size_ in |
75 // ConfigureHeap. | 75 // ConfigureHeap. |
76 // Will be 4 * reserved_semispace_size_ to ensure that young | 76 // Will be 4 * reserved_semispace_size_ to ensure that young |
77 // generation can be aligned to its size. | 77 // generation can be aligned to its size. |
78 maximum_committed_(0), | 78 maximum_committed_(0), |
79 survived_since_last_expansion_(0), | 79 survived_since_last_expansion_(0), |
(...skipping 16 matching lines...) Expand all Loading... | |
96 raw_allocations_hash_(0), | 96 raw_allocations_hash_(0), |
97 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc), | 97 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc), |
98 ms_count_(0), | 98 ms_count_(0), |
99 gc_count_(0), | 99 gc_count_(0), |
100 remembered_unmapped_pages_index_(0), | 100 remembered_unmapped_pages_index_(0), |
101 unflattened_strings_length_(0), | 101 unflattened_strings_length_(0), |
102 #ifdef DEBUG | 102 #ifdef DEBUG |
103 allocation_timeout_(0), | 103 allocation_timeout_(0), |
104 #endif // DEBUG | 104 #endif // DEBUG |
105 old_generation_allocation_limit_(initial_old_generation_size_), | 105 old_generation_allocation_limit_(initial_old_generation_size_), |
106 old_generation_committed_memory_limit_(kDefaultMaxOldGenSize >> 1), | |
106 old_gen_exhausted_(false), | 107 old_gen_exhausted_(false), |
107 inline_allocation_disabled_(false), | 108 inline_allocation_disabled_(false), |
108 store_buffer_rebuilder_(store_buffer()), | 109 store_buffer_rebuilder_(store_buffer()), |
109 hidden_string_(NULL), | 110 hidden_string_(NULL), |
110 gc_safe_size_of_old_object_(NULL), | 111 gc_safe_size_of_old_object_(NULL), |
111 total_regexp_code_generated_(0), | 112 total_regexp_code_generated_(0), |
112 tracer_(this), | 113 tracer_(this), |
113 high_survival_rate_period_length_(0), | 114 high_survival_rate_period_length_(0), |
114 promoted_objects_size_(0), | 115 promoted_objects_size_(0), |
115 promotion_ratio_(0), | 116 promotion_ratio_(0), |
(...skipping 738 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
854 | 855 |
855 EnsureFillerObjectAtTop(); | 856 EnsureFillerObjectAtTop(); |
856 | 857 |
857 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) { | 858 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) { |
858 if (FLAG_trace_incremental_marking) { | 859 if (FLAG_trace_incremental_marking) { |
859 PrintF("[IncrementalMarking] Scavenge during marking.\n"); | 860 PrintF("[IncrementalMarking] Scavenge during marking.\n"); |
860 } | 861 } |
861 } | 862 } |
862 | 863 |
863 if (collector == MARK_COMPACTOR && | 864 if (collector == MARK_COMPACTOR && |
864 !mark_compact_collector()->abort_incremental_marking() && | 865 !mark_compact_collector()->incremental_marking_abort_requested() && |
865 !incremental_marking()->IsStopped() && | 866 !incremental_marking()->IsStopped() && |
866 !incremental_marking()->should_hurry() && | 867 !incremental_marking()->should_hurry() && |
867 FLAG_incremental_marking_steps) { | 868 FLAG_incremental_marking_steps) { |
868 // Make progress in incremental marking. | 869 // Make progress in incremental marking. |
869 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB; | 870 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB; |
870 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge, | 871 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge, |
871 IncrementalMarking::NO_GC_VIA_STACK_GUARD); | 872 IncrementalMarking::NO_GC_VIA_STACK_GUARD); |
872 if (!incremental_marking()->IsComplete() && | 873 if (!incremental_marking()->IsComplete() && |
873 !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) { | 874 !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) { |
874 if (FLAG_trace_incremental_marking) { | 875 if (FLAG_trace_incremental_marking) { |
(...skipping 22 matching lines...) Expand all Loading... | |
897 | 898 |
898 GarbageCollectionEpilogue(); | 899 GarbageCollectionEpilogue(); |
899 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) { | 900 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) { |
900 isolate()->CheckDetachedContextsAfterGC(); | 901 isolate()->CheckDetachedContextsAfterGC(); |
901 } | 902 } |
902 tracer()->Stop(collector); | 903 tracer()->Stop(collector); |
903 } | 904 } |
904 | 905 |
905 // Start incremental marking for the next cycle. The heap snapshot | 906 // Start incremental marking for the next cycle. The heap snapshot |
906 // generator needs incremental marking to stay off after it aborted. | 907 // generator needs incremental marking to stay off after it aborted. |
907 if (!mark_compact_collector()->abort_incremental_marking() && | 908 if (!mark_compact_collector()->incremental_marking_abort_requested() && |
908 WorthActivatingIncrementalMarking()) { | 909 incremental_marking()->IsStopped() && |
910 incremental_marking()->ShouldActivate()) { | |
909 incremental_marking()->Start(); | 911 incremental_marking()->Start(); |
910 } | 912 } |
911 | 913 |
912 return next_gc_likely_to_collect_more; | 914 return next_gc_likely_to_collect_more; |
913 } | 915 } |
914 | 916 |
915 | 917 |
916 int Heap::NotifyContextDisposed(bool dependant_context) { | 918 int Heap::NotifyContextDisposed(bool dependant_context) { |
917 if (!dependant_context) { | 919 if (!dependant_context) { |
918 tracer()->ResetSurvivalEvents(); | 920 tracer()->ResetSurvivalEvents(); |
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1155 incremental_marking()->NotifyOfHighPromotionRate(); | 1157 incremental_marking()->NotifyOfHighPromotionRate(); |
1156 } | 1158 } |
1157 | 1159 |
1158 if (collector == MARK_COMPACTOR) { | 1160 if (collector == MARK_COMPACTOR) { |
1159 // Perform mark-sweep with optional compaction. | 1161 // Perform mark-sweep with optional compaction. |
1160 MarkCompact(); | 1162 MarkCompact(); |
1161 sweep_generation_++; | 1163 sweep_generation_++; |
1162 // Temporarily set the limit for case when PostGarbageCollectionProcessing | 1164 // Temporarily set the limit for case when PostGarbageCollectionProcessing |
1163 // allocates and triggers GC. The real limit is set at after | 1165 // allocates and triggers GC. The real limit is set at after |
1164 // PostGarbageCollectionProcessing. | 1166 // PostGarbageCollectionProcessing. |
1165 old_generation_allocation_limit_ = | 1167 SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0, false); |
1166 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0); | |
1167 old_gen_exhausted_ = false; | 1168 old_gen_exhausted_ = false; |
1168 old_generation_size_configured_ = true; | 1169 old_generation_size_configured_ = true; |
1169 } else { | 1170 } else { |
1170 Scavenge(); | 1171 Scavenge(); |
1171 } | 1172 } |
1172 | 1173 |
1173 UpdateSurvivalStatistics(start_new_space_size); | 1174 UpdateSurvivalStatistics(start_new_space_size); |
1174 ConfigureInitialOldGenerationSize(); | 1175 ConfigureInitialOldGenerationSize(); |
1175 | 1176 |
1176 isolate_->counters()->objs_since_last_young()->Set(0); | 1177 isolate_->counters()->objs_since_last_young()->Set(0); |
(...skipping 13 matching lines...) Expand all Loading... | |
1190 | 1191 |
1191 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); | 1192 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); |
1192 | 1193 |
1193 // Update relocatables. | 1194 // Update relocatables. |
1194 Relocatable::PostGarbageCollectionProcessing(isolate_); | 1195 Relocatable::PostGarbageCollectionProcessing(isolate_); |
1195 | 1196 |
1196 if (collector == MARK_COMPACTOR) { | 1197 if (collector == MARK_COMPACTOR) { |
1197 // Register the amount of external allocated memory. | 1198 // Register the amount of external allocated memory. |
1198 amount_of_external_allocated_memory_at_last_global_gc_ = | 1199 amount_of_external_allocated_memory_at_last_global_gc_ = |
1199 amount_of_external_allocated_memory_; | 1200 amount_of_external_allocated_memory_; |
1200 old_generation_allocation_limit_ = OldGenerationAllocationLimit( | 1201 SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), |
1201 PromotedSpaceSizeOfObjects(), freed_global_handles); | 1202 freed_global_handles, true); |
1202 // We finished a marking cycle. We can uncommit the marking deque until | 1203 // We finished a marking cycle. We can uncommit the marking deque until |
1203 // we start marking again. | 1204 // we start marking again. |
1204 mark_compact_collector_.UncommitMarkingDeque(); | 1205 mark_compact_collector_.UncommitMarkingDeque(); |
1205 } | 1206 } |
1206 | 1207 |
1207 { | 1208 { |
1208 GCCallbacksScope scope(this); | 1209 GCCallbacksScope scope(this); |
1209 if (scope.CheckReenter()) { | 1210 if (scope.CheckReenter()) { |
1210 AllowHeapAllocation allow_allocation; | 1211 AllowHeapAllocation allow_allocation; |
1211 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | 1212 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); |
(...skipping 3370 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4582 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact( | 4583 gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact( |
4583 static_cast<size_t>(idle_time_in_ms), size_of_objects, | 4584 static_cast<size_t>(idle_time_in_ms), size_of_objects, |
4584 final_incremental_mark_compact_speed_in_bytes_per_ms))) { | 4585 final_incremental_mark_compact_speed_in_bytes_per_ms))) { |
4585 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental"); | 4586 CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental"); |
4586 return true; | 4587 return true; |
4587 } | 4588 } |
4588 return false; | 4589 return false; |
4589 } | 4590 } |
4590 | 4591 |
4591 | 4592 |
4592 bool Heap::WorthActivatingIncrementalMarking() { | |
4593 return incremental_marking()->IsStopped() && | |
4594 incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull(); | |
4595 } | |
4596 | |
4597 | |
4598 static double MonotonicallyIncreasingTimeInMs() { | 4593 static double MonotonicallyIncreasingTimeInMs() { |
4599 return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() * | 4594 return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() * |
4600 static_cast<double>(base::Time::kMillisecondsPerSecond); | 4595 static_cast<double>(base::Time::kMillisecondsPerSecond); |
4601 } | 4596 } |
4602 | 4597 |
4603 | 4598 |
4604 bool Heap::IdleNotification(int idle_time_in_ms) { | 4599 bool Heap::IdleNotification(int idle_time_in_ms) { |
4605 return IdleNotification( | 4600 return IdleNotification( |
4606 V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() + | 4601 V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() + |
4607 (static_cast<double>(idle_time_in_ms) / | 4602 (static_cast<double>(idle_time_in_ms) / |
(...skipping 624 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5232 | 5227 |
5233 int64_t Heap::PromotedExternalMemorySize() { | 5228 int64_t Heap::PromotedExternalMemorySize() { |
5234 if (amount_of_external_allocated_memory_ <= | 5229 if (amount_of_external_allocated_memory_ <= |
5235 amount_of_external_allocated_memory_at_last_global_gc_) | 5230 amount_of_external_allocated_memory_at_last_global_gc_) |
5236 return 0; | 5231 return 0; |
5237 return amount_of_external_allocated_memory_ - | 5232 return amount_of_external_allocated_memory_ - |
5238 amount_of_external_allocated_memory_at_last_global_gc_; | 5233 amount_of_external_allocated_memory_at_last_global_gc_; |
5239 } | 5234 } |
5240 | 5235 |
5241 | 5236 |
5242 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size, | 5237 void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size, |
5243 int freed_global_handles) { | 5238 int freed_global_handles, |
5239 bool weak_callbacks_completed) { | |
5244 const int kMaxHandles = 1000; | 5240 const int kMaxHandles = 1000; |
5245 const int kMinHandles = 100; | 5241 const int kMinHandles = 100; |
5246 double min_factor = 1.1; | 5242 double min_factor = 1.1; |
5247 double max_factor = 4; | 5243 double max_factor = 4; |
5248 // We set the old generation growing factor to 2 to grow the heap slower on | 5244 // We set the old generation growing factor to 2 to grow the heap slower on |
5249 // memory-constrained devices. | 5245 // memory-constrained devices. |
5250 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) { | 5246 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) { |
5251 max_factor = 2; | 5247 max_factor = 2; |
5252 } | 5248 } |
5253 // If there are many freed global handles, then the next full GC will | 5249 // If there are many freed global handles, then the next full GC will |
(...skipping 13 matching lines...) Expand all Loading... | |
5267 (kMaxHandles - kMinHandles); | 5263 (kMaxHandles - kMinHandles); |
5268 } | 5264 } |
5269 | 5265 |
5270 if (FLAG_stress_compaction || | 5266 if (FLAG_stress_compaction || |
5271 mark_compact_collector()->reduce_memory_footprint_) { | 5267 mark_compact_collector()->reduce_memory_footprint_) { |
5272 factor = min_factor; | 5268 factor = min_factor; |
5273 } | 5269 } |
5274 | 5270 |
5275 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor); | 5271 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor); |
5276 limit = Max(limit, kMinimumOldGenerationAllocationLimit); | 5272 limit = Max(limit, kMinimumOldGenerationAllocationLimit); |
5277 limit += new_space_.Capacity(); | 5273 |
5278 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; | 5274 old_generation_allocation_limit_ = limit + new_space_.Capacity(); |
ulan
2015/04/07 13:27:19
Should we enforce that old_generation_allocation_l
Erik Corry Chromium.org
2015/04/09 12:26:15
I think since we consult both, it is clearer to le
| |
5279 return Min(limit, halfway_to_the_max); | 5275 |
5276 // The committed memory limit is halfway from the current live object count | |
5277 // to the max size. This means that if half of our allowed extra memory is | |
5278 // currently taken by fragmentation we will immediately start another | |
5279 // incremental GC. If there is no fragmentation, we will start incremental | |
5280 // GC when we have committed half the allowed extra memory. This limit will | |
5281 // be compared against the committed memory, ie including fragmentation. | |
5282 intptr_t commit_limit_basis = old_gen_size; | |
ulan
2015/04/07 13:27:19
This can lead to never-ending GCs even if fragment
Erik Corry Chromium.org
2015/04/09 12:26:15
Done.
| |
5283 if (FLAG_never_compact || !FLAG_compact_code_space) { | |
5284 // No point in provoking GCs to get rid of fragmentation if we can't | |
5285 // actually get rid of fragmentation. In this case set the limit higher. | |
5286 commit_limit_basis = CommittedOldGenerationMemory(); | |
5287 } | |
5288 old_generation_committed_memory_limit_ = | |
5289 commit_limit_basis / 2 + max_old_generation_size_ / 2; | |
5290 | |
5291 if (weak_callbacks_completed && FLAG_trace_gc) { | |
5292 PrintPID("%8.0f ms: ", isolate()->time_millis_since_init()); | |
5293 PrintF("Next GC at %.1f (%.1f) -> %.1f (%.1f)\n", old_gen_size * 1.0 / MB, | |
5294 CommittedOldGenerationMemory() * 1.0 / MB, limit * 1.0 / MB, | |
5295 old_generation_committed_memory_limit_ * 1.0 / MB); | |
5296 } | |
5280 } | 5297 } |
5281 | 5298 |
5282 | 5299 |
5283 void Heap::EnableInlineAllocation() { | 5300 void Heap::EnableInlineAllocation() { |
5284 if (!inline_allocation_disabled_) return; | 5301 if (!inline_allocation_disabled_) return; |
5285 inline_allocation_disabled_ = false; | 5302 inline_allocation_disabled_ = false; |
5286 | 5303 |
5287 // Update inline allocation limit for new space. | 5304 // Update inline allocation limit for new space. |
5288 new_space()->UpdateInlineAllocationLimit(0); | 5305 new_space()->UpdateInlineAllocationLimit(0); |
5289 } | 5306 } |
(...skipping 1126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6416 static_cast<int>(object_sizes_last_time_[index])); | 6433 static_cast<int>(object_sizes_last_time_[index])); |
6417 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) | 6434 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) |
6418 #undef ADJUST_LAST_TIME_OBJECT_COUNT | 6435 #undef ADJUST_LAST_TIME_OBJECT_COUNT |
6419 | 6436 |
6420 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); | 6437 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); |
6421 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); | 6438 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); |
6422 ClearObjectStats(); | 6439 ClearObjectStats(); |
6423 } | 6440 } |
6424 } | 6441 } |
6425 } // namespace v8::internal | 6442 } // namespace v8::internal |
OLD | NEW |