OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/once.h" | 10 #include "src/base/once.h" |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
56 amount_of_external_allocated_memory_at_last_global_gc_(0), | 56 amount_of_external_allocated_memory_at_last_global_gc_(0), |
57 isolate_(NULL), | 57 isolate_(NULL), |
58 code_range_size_(0), | 58 code_range_size_(0), |
59 // semispace_size_ should be a power of 2 and old_generation_size_ should | 59 // semispace_size_ should be a power of 2 and old_generation_size_ should |
60 // be a multiple of Page::kPageSize. | 60 // be a multiple of Page::kPageSize. |
61 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), | 61 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), |
62 max_semi_space_size_(8 * (kPointerSize / 4) * MB), | 62 max_semi_space_size_(8 * (kPointerSize / 4) * MB), |
63 initial_semispace_size_(Page::kPageSize), | 63 initial_semispace_size_(Page::kPageSize), |
64 target_semispace_size_(Page::kPageSize), | 64 target_semispace_size_(Page::kPageSize), |
65 max_old_generation_size_(700ul * (kPointerSize / 4) * MB), | 65 max_old_generation_size_(700ul * (kPointerSize / 4) * MB), |
| 66 initial_old_generation_size_(max_old_generation_size_), |
| 67 old_generation_size_configured_(false), |
66 max_executable_size_(256ul * (kPointerSize / 4) * MB), | 68 max_executable_size_(256ul * (kPointerSize / 4) * MB), |
67 // Variables set based on semispace_size_ and old_generation_size_ in | 69 // Variables set based on semispace_size_ and old_generation_size_ in |
68 // ConfigureHeap. | 70 // ConfigureHeap. |
69 // Will be 4 * reserved_semispace_size_ to ensure that young | 71 // Will be 4 * reserved_semispace_size_ to ensure that young |
70 // generation can be aligned to its size. | 72 // generation can be aligned to its size. |
71 maximum_committed_(0), | 73 maximum_committed_(0), |
72 survived_since_last_expansion_(0), | 74 survived_since_last_expansion_(0), |
73 survived_last_scavenge_(0), | 75 survived_last_scavenge_(0), |
74 sweep_generation_(0), | 76 sweep_generation_(0), |
75 always_allocate_scope_depth_(0), | 77 always_allocate_scope_depth_(0), |
(...skipping 14 matching lines...) Expand all Loading... |
90 allocations_count_(0), | 92 allocations_count_(0), |
91 raw_allocations_hash_(0), | 93 raw_allocations_hash_(0), |
92 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc), | 94 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc), |
93 ms_count_(0), | 95 ms_count_(0), |
94 gc_count_(0), | 96 gc_count_(0), |
95 remembered_unmapped_pages_index_(0), | 97 remembered_unmapped_pages_index_(0), |
96 unflattened_strings_length_(0), | 98 unflattened_strings_length_(0), |
97 #ifdef DEBUG | 99 #ifdef DEBUG |
98 allocation_timeout_(0), | 100 allocation_timeout_(0), |
99 #endif // DEBUG | 101 #endif // DEBUG |
100 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit), | 102 old_generation_allocation_limit_(initial_old_generation_size_), |
101 old_gen_exhausted_(false), | 103 old_gen_exhausted_(false), |
102 inline_allocation_disabled_(false), | 104 inline_allocation_disabled_(false), |
103 store_buffer_rebuilder_(store_buffer()), | 105 store_buffer_rebuilder_(store_buffer()), |
104 hidden_string_(NULL), | 106 hidden_string_(NULL), |
105 gc_safe_size_of_old_object_(NULL), | 107 gc_safe_size_of_old_object_(NULL), |
106 total_regexp_code_generated_(0), | 108 total_regexp_code_generated_(0), |
107 tracer_(this), | 109 tracer_(this), |
108 high_survival_rate_period_length_(0), | 110 high_survival_rate_period_length_(0), |
109 promoted_objects_size_(0), | 111 promoted_objects_size_(0), |
110 promotion_rate_(0), | 112 promotion_ratio_(0), |
111 semi_space_copied_object_size_(0), | 113 semi_space_copied_object_size_(0), |
| 114 previous_semi_space_copied_object_size_(0), |
112 semi_space_copied_rate_(0), | 115 semi_space_copied_rate_(0), |
113 nodes_died_in_new_space_(0), | 116 nodes_died_in_new_space_(0), |
114 nodes_copied_in_new_space_(0), | 117 nodes_copied_in_new_space_(0), |
115 nodes_promoted_(0), | 118 nodes_promoted_(0), |
116 maximum_size_scavenges_(0), | 119 maximum_size_scavenges_(0), |
117 max_gc_pause_(0.0), | 120 max_gc_pause_(0.0), |
118 total_gc_time_ms_(0.0), | 121 total_gc_time_ms_(0.0), |
119 max_alive_after_gc_(0), | 122 max_alive_after_gc_(0), |
120 min_in_mutator_(kMaxInt), | 123 min_in_mutator_(kMaxInt), |
121 marking_time_(0.0), | 124 marking_time_(0.0), |
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
426 | 429 |
427 #ifdef VERIFY_HEAP | 430 #ifdef VERIFY_HEAP |
428 if (FLAG_verify_heap) { | 431 if (FLAG_verify_heap) { |
429 Verify(); | 432 Verify(); |
430 } | 433 } |
431 #endif | 434 #endif |
432 } | 435 } |
433 | 436 |
434 // Reset GC statistics. | 437 // Reset GC statistics. |
435 promoted_objects_size_ = 0; | 438 promoted_objects_size_ = 0; |
| 439 previous_semi_space_copied_object_size_ = semi_space_copied_object_size_; |
436 semi_space_copied_object_size_ = 0; | 440 semi_space_copied_object_size_ = 0; |
437 nodes_died_in_new_space_ = 0; | 441 nodes_died_in_new_space_ = 0; |
438 nodes_copied_in_new_space_ = 0; | 442 nodes_copied_in_new_space_ = 0; |
439 nodes_promoted_ = 0; | 443 nodes_promoted_ = 0; |
440 | 444 |
441 UpdateMaximumCommitted(); | 445 UpdateMaximumCommitted(); |
442 | 446 |
443 #ifdef DEBUG | 447 #ifdef DEBUG |
444 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); | 448 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); |
445 | 449 |
(...skipping 583 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1029 NormalizedMapCache::cast(cache)->Clear(); | 1033 NormalizedMapCache::cast(cache)->Clear(); |
1030 } | 1034 } |
1031 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); | 1035 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); |
1032 } | 1036 } |
1033 } | 1037 } |
1034 | 1038 |
1035 | 1039 |
1036 void Heap::UpdateSurvivalStatistics(int start_new_space_size) { | 1040 void Heap::UpdateSurvivalStatistics(int start_new_space_size) { |
1037 if (start_new_space_size == 0) return; | 1041 if (start_new_space_size == 0) return; |
1038 | 1042 |
1039 promotion_rate_ = (static_cast<double>(promoted_objects_size_) / | 1043 promotion_ratio_ = (static_cast<double>(promoted_objects_size_) / |
1040 static_cast<double>(start_new_space_size) * 100); | 1044 static_cast<double>(start_new_space_size) * 100); |
| 1045 |
| 1046 if (previous_semi_space_copied_object_size_ > 0) { |
| 1047 promotion_rate_ = |
| 1048 (static_cast<double>(promoted_objects_size_) / |
| 1049 static_cast<double>(previous_semi_space_copied_object_size_) * 100); |
| 1050 } else { |
| 1051 promotion_rate_ = 0; |
| 1052 } |
1041 | 1053 |
1042 semi_space_copied_rate_ = | 1054 semi_space_copied_rate_ = |
1043 (static_cast<double>(semi_space_copied_object_size_) / | 1055 (static_cast<double>(semi_space_copied_object_size_) / |
1044 static_cast<double>(start_new_space_size) * 100); | 1056 static_cast<double>(start_new_space_size) * 100); |
1045 | 1057 |
1046 double survival_rate = promotion_rate_ + semi_space_copied_rate_; | 1058 double survival_rate = promotion_ratio_ + semi_space_copied_rate_; |
| 1059 tracer()->AddSurvivalRate(survival_rate); |
1047 | 1060 |
1048 if (survival_rate > kYoungSurvivalRateHighThreshold) { | 1061 if (survival_rate > kYoungSurvivalRateHighThreshold) { |
1049 high_survival_rate_period_length_++; | 1062 high_survival_rate_period_length_++; |
1050 } else { | 1063 } else { |
1051 high_survival_rate_period_length_ = 0; | 1064 high_survival_rate_period_length_ = 0; |
1052 } | 1065 } |
1053 } | 1066 } |
1054 | 1067 |
1055 bool Heap::PerformGarbageCollection( | 1068 bool Heap::PerformGarbageCollection( |
1056 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { | 1069 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1094 if (collector == MARK_COMPACTOR) { | 1107 if (collector == MARK_COMPACTOR) { |
1095 // Perform mark-sweep with optional compaction. | 1108 // Perform mark-sweep with optional compaction. |
1096 MarkCompact(); | 1109 MarkCompact(); |
1097 sweep_generation_++; | 1110 sweep_generation_++; |
1098 // Temporarily set the limit for case when PostGarbageCollectionProcessing | 1111 // Temporarily set the limit for case when PostGarbageCollectionProcessing |
1099 // allocates and triggers GC. The real limit is set at after | 1112 // allocates and triggers GC. The real limit is set at after |
1100 // PostGarbageCollectionProcessing. | 1113 // PostGarbageCollectionProcessing. |
1101 old_generation_allocation_limit_ = | 1114 old_generation_allocation_limit_ = |
1102 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0); | 1115 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0); |
1103 old_gen_exhausted_ = false; | 1116 old_gen_exhausted_ = false; |
| 1117 old_generation_size_configured_ = true; |
1104 } else { | 1118 } else { |
1105 Scavenge(); | 1119 Scavenge(); |
1106 } | 1120 } |
1107 | 1121 |
1108 UpdateSurvivalStatistics(start_new_space_size); | 1122 UpdateSurvivalStatistics(start_new_space_size); |
| 1123 ConfigureInitialOldGenerationSize(); |
1109 | 1124 |
1110 isolate_->counters()->objs_since_last_young()->Set(0); | 1125 isolate_->counters()->objs_since_last_young()->Set(0); |
1111 | 1126 |
1112 // Callbacks that fire after this point might trigger nested GCs and | 1127 // Callbacks that fire after this point might trigger nested GCs and |
1113 // restart incremental marking, the assertion can't be moved down. | 1128 // restart incremental marking, the assertion can't be moved down. |
1114 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped()); | 1129 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped()); |
1115 | 1130 |
1116 gc_post_processing_depth_++; | 1131 gc_post_processing_depth_++; |
1117 { | 1132 { |
1118 AllowHeapAllocation allow_allocation; | 1133 AllowHeapAllocation allow_allocation; |
(...skipping 1215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2334 | 2349 |
2335 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { | 2350 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { |
2336 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object)); | 2351 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object)); |
2337 MapWord first_word = object->map_word(); | 2352 MapWord first_word = object->map_word(); |
2338 SLOW_DCHECK(!first_word.IsForwardingAddress()); | 2353 SLOW_DCHECK(!first_word.IsForwardingAddress()); |
2339 Map* map = first_word.ToMap(); | 2354 Map* map = first_word.ToMap(); |
2340 map->GetHeap()->DoScavengeObject(map, p, object); | 2355 map->GetHeap()->DoScavengeObject(map, p, object); |
2341 } | 2356 } |
2342 | 2357 |
2343 | 2358 |
| 2359 void Heap::ConfigureInitialOldGenerationSize() { |
| 2360 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) { |
| 2361 old_generation_allocation_limit_ = |
| 2362 Max(kMinimumOldGenerationAllocationLimit, |
| 2363 static_cast<intptr_t>( |
| 2364 static_cast<double>(initial_old_generation_size_) * |
| 2365 (tracer()->AverageSurvivalRate() / 100))); |
| 2366 } |
| 2367 } |
| 2368 |
| 2369 |
2344 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, | 2370 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, |
2345 int instance_size) { | 2371 int instance_size) { |
2346 Object* result; | 2372 Object* result; |
2347 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); | 2373 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); |
2348 if (!allocation.To(&result)) return allocation; | 2374 if (!allocation.To(&result)) return allocation; |
2349 | 2375 |
2350 // Map::cast cannot be used due to uninitialized map field. | 2376 // Map::cast cannot be used due to uninitialized map field. |
2351 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map()); | 2377 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map()); |
2352 reinterpret_cast<Map*>(result)->set_instance_type(instance_type); | 2378 reinterpret_cast<Map*>(result)->set_instance_type(instance_type); |
2353 reinterpret_cast<Map*>(result)->set_instance_size(instance_size); | 2379 reinterpret_cast<Map*>(result)->set_instance_size(instance_size); |
(...skipping 2792 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5146 if (FLAG_semi_space_growth_factor < 2) { | 5172 if (FLAG_semi_space_growth_factor < 2) { |
5147 FLAG_semi_space_growth_factor = 2; | 5173 FLAG_semi_space_growth_factor = 2; |
5148 } | 5174 } |
5149 | 5175 |
5150 // The old generation is paged and needs at least one page for each space. | 5176 // The old generation is paged and needs at least one page for each space. |
5151 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; | 5177 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; |
5152 max_old_generation_size_ = | 5178 max_old_generation_size_ = |
5153 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize), | 5179 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize), |
5154 max_old_generation_size_); | 5180 max_old_generation_size_); |
5155 | 5181 |
| 5182 if (FLAG_initial_old_space_size > 0) { |
| 5183 initial_old_generation_size_ = FLAG_initial_old_space_size * MB; |
| 5184 } else { |
| 5185 initial_old_generation_size_ = max_old_generation_size_; |
| 5186 } |
| 5187 old_generation_allocation_limit_ = initial_old_generation_size_; |
| 5188 |
5156 // We rely on being able to allocate new arrays in paged spaces. | 5189 // We rely on being able to allocate new arrays in paged spaces. |
5157 DCHECK(Page::kMaxRegularHeapObjectSize >= | 5190 DCHECK(Page::kMaxRegularHeapObjectSize >= |
5158 (JSArray::kSize + | 5191 (JSArray::kSize + |
5159 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) + | 5192 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) + |
5160 AllocationMemento::kSize)); | 5193 AllocationMemento::kSize)); |
5161 | 5194 |
5162 code_range_size_ = code_range_size * MB; | 5195 code_range_size_ = code_range_size * MB; |
5163 | 5196 |
5164 configured_ = true; | 5197 configured_ = true; |
5165 return true; | 5198 return true; |
(...skipping 1229 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6395 static_cast<int>(object_sizes_last_time_[index])); | 6428 static_cast<int>(object_sizes_last_time_[index])); |
6396 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) | 6429 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) |
6397 #undef ADJUST_LAST_TIME_OBJECT_COUNT | 6430 #undef ADJUST_LAST_TIME_OBJECT_COUNT |
6398 | 6431 |
6399 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); | 6432 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); |
6400 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); | 6433 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); |
6401 ClearObjectStats(); | 6434 ClearObjectStats(); |
6402 } | 6435 } |
6403 } | 6436 } |
6404 } // namespace v8::internal | 6437 } // namespace v8::internal |
OLD | NEW |