Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(85)

Side by Side Diff: src/heap/heap.cc

Issue 813733002: Revert of Shrink initial old generation size based on new space survival rate. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
56 amount_of_external_allocated_memory_at_last_global_gc_(0), 56 amount_of_external_allocated_memory_at_last_global_gc_(0),
57 isolate_(NULL), 57 isolate_(NULL),
58 code_range_size_(0), 58 code_range_size_(0),
59 // semispace_size_ should be a power of 2 and old_generation_size_ should 59 // semispace_size_ should be a power of 2 and old_generation_size_ should
60 // be a multiple of Page::kPageSize. 60 // be a multiple of Page::kPageSize.
61 reserved_semispace_size_(8 * (kPointerSize / 4) * MB), 61 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
62 max_semi_space_size_(8 * (kPointerSize / 4) * MB), 62 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
63 initial_semispace_size_(Page::kPageSize), 63 initial_semispace_size_(Page::kPageSize),
64 target_semispace_size_(Page::kPageSize), 64 target_semispace_size_(Page::kPageSize),
65 max_old_generation_size_(700ul * (kPointerSize / 4) * MB), 65 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
66 initial_old_generation_size_(max_old_generation_size_),
67 old_generation_size_configured_(false),
68 max_executable_size_(256ul * (kPointerSize / 4) * MB), 66 max_executable_size_(256ul * (kPointerSize / 4) * MB),
69 // Variables set based on semispace_size_ and old_generation_size_ in 67 // Variables set based on semispace_size_ and old_generation_size_ in
70 // ConfigureHeap. 68 // ConfigureHeap.
71 // Will be 4 * reserved_semispace_size_ to ensure that young 69 // Will be 4 * reserved_semispace_size_ to ensure that young
72 // generation can be aligned to its size. 70 // generation can be aligned to its size.
73 maximum_committed_(0), 71 maximum_committed_(0),
74 survived_since_last_expansion_(0), 72 survived_since_last_expansion_(0),
75 survived_last_scavenge_(0), 73 survived_last_scavenge_(0),
76 sweep_generation_(0), 74 sweep_generation_(0),
77 always_allocate_scope_depth_(0), 75 always_allocate_scope_depth_(0),
(...skipping 14 matching lines...) Expand all
92 allocations_count_(0), 90 allocations_count_(0),
93 raw_allocations_hash_(0), 91 raw_allocations_hash_(0),
94 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc), 92 dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
95 ms_count_(0), 93 ms_count_(0),
96 gc_count_(0), 94 gc_count_(0),
97 remembered_unmapped_pages_index_(0), 95 remembered_unmapped_pages_index_(0),
98 unflattened_strings_length_(0), 96 unflattened_strings_length_(0),
99 #ifdef DEBUG 97 #ifdef DEBUG
100 allocation_timeout_(0), 98 allocation_timeout_(0),
101 #endif // DEBUG 99 #endif // DEBUG
102 old_generation_allocation_limit_(initial_old_generation_size_), 100 old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
103 old_gen_exhausted_(false), 101 old_gen_exhausted_(false),
104 inline_allocation_disabled_(false), 102 inline_allocation_disabled_(false),
105 store_buffer_rebuilder_(store_buffer()), 103 store_buffer_rebuilder_(store_buffer()),
106 hidden_string_(NULL), 104 hidden_string_(NULL),
107 gc_safe_size_of_old_object_(NULL), 105 gc_safe_size_of_old_object_(NULL),
108 total_regexp_code_generated_(0), 106 total_regexp_code_generated_(0),
109 tracer_(this), 107 tracer_(this),
110 high_survival_rate_period_length_(0), 108 high_survival_rate_period_length_(0),
111 promoted_objects_size_(0), 109 promoted_objects_size_(0),
112 promotion_ratio_(0), 110 promotion_rate_(0),
113 semi_space_copied_object_size_(0), 111 semi_space_copied_object_size_(0),
114 previous_semi_space_copied_object_size_(0),
115 semi_space_copied_rate_(0), 112 semi_space_copied_rate_(0),
116 nodes_died_in_new_space_(0), 113 nodes_died_in_new_space_(0),
117 nodes_copied_in_new_space_(0), 114 nodes_copied_in_new_space_(0),
118 nodes_promoted_(0), 115 nodes_promoted_(0),
119 maximum_size_scavenges_(0), 116 maximum_size_scavenges_(0),
120 max_gc_pause_(0.0), 117 max_gc_pause_(0.0),
121 total_gc_time_ms_(0.0), 118 total_gc_time_ms_(0.0),
122 max_alive_after_gc_(0), 119 max_alive_after_gc_(0),
123 min_in_mutator_(kMaxInt), 120 min_in_mutator_(kMaxInt),
124 marking_time_(0.0), 121 marking_time_(0.0),
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after
429 426
430 #ifdef VERIFY_HEAP 427 #ifdef VERIFY_HEAP
431 if (FLAG_verify_heap) { 428 if (FLAG_verify_heap) {
432 Verify(); 429 Verify();
433 } 430 }
434 #endif 431 #endif
435 } 432 }
436 433
437 // Reset GC statistics. 434 // Reset GC statistics.
438 promoted_objects_size_ = 0; 435 promoted_objects_size_ = 0;
439 previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
440 semi_space_copied_object_size_ = 0; 436 semi_space_copied_object_size_ = 0;
441 nodes_died_in_new_space_ = 0; 437 nodes_died_in_new_space_ = 0;
442 nodes_copied_in_new_space_ = 0; 438 nodes_copied_in_new_space_ = 0;
443 nodes_promoted_ = 0; 439 nodes_promoted_ = 0;
444 440
445 UpdateMaximumCommitted(); 441 UpdateMaximumCommitted();
446 442
447 #ifdef DEBUG 443 #ifdef DEBUG
448 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); 444 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
449 445
(...skipping 583 matching lines...) Expand 10 before | Expand all | Expand 10 after
1033 NormalizedMapCache::cast(cache)->Clear(); 1029 NormalizedMapCache::cast(cache)->Clear();
1034 } 1030 }
1035 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); 1031 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1036 } 1032 }
1037 } 1033 }
1038 1034
1039 1035
1040 void Heap::UpdateSurvivalStatistics(int start_new_space_size) { 1036 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1041 if (start_new_space_size == 0) return; 1037 if (start_new_space_size == 0) return;
1042 1038
1043 promotion_ratio_ = (static_cast<double>(promoted_objects_size_) / 1039 promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
1044 static_cast<double>(start_new_space_size) * 100); 1040 static_cast<double>(start_new_space_size) * 100);
1045
1046 if (previous_semi_space_copied_object_size_ > 0) {
1047 promotion_rate_ =
1048 (static_cast<double>(promoted_objects_size_) /
1049 static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1050 } else {
1051 promotion_rate_ = 0;
1052 }
1053 1041
1054 semi_space_copied_rate_ = 1042 semi_space_copied_rate_ =
1055 (static_cast<double>(semi_space_copied_object_size_) / 1043 (static_cast<double>(semi_space_copied_object_size_) /
1056 static_cast<double>(start_new_space_size) * 100); 1044 static_cast<double>(start_new_space_size) * 100);
1057 1045
1058 double survival_rate = promotion_ratio_ + semi_space_copied_rate_; 1046 double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1059 tracer()->AddSurvivalRate(survival_rate);
1060 1047
1061 if (survival_rate > kYoungSurvivalRateHighThreshold) { 1048 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1062 high_survival_rate_period_length_++; 1049 high_survival_rate_period_length_++;
1063 } else { 1050 } else {
1064 high_survival_rate_period_length_ = 0; 1051 high_survival_rate_period_length_ = 0;
1065 } 1052 }
1066 } 1053 }
1067 1054
1068 bool Heap::PerformGarbageCollection( 1055 bool Heap::PerformGarbageCollection(
1069 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { 1056 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1107 if (collector == MARK_COMPACTOR) { 1094 if (collector == MARK_COMPACTOR) {
1108 // Perform mark-sweep with optional compaction. 1095 // Perform mark-sweep with optional compaction.
1109 MarkCompact(); 1096 MarkCompact();
1110 sweep_generation_++; 1097 sweep_generation_++;
1111 // Temporarily set the limit for case when PostGarbageCollectionProcessing 1098 // Temporarily set the limit for case when PostGarbageCollectionProcessing
1112 // allocates and triggers GC. The real limit is set at after 1099 // allocates and triggers GC. The real limit is set at after
1113 // PostGarbageCollectionProcessing. 1100 // PostGarbageCollectionProcessing.
1114 old_generation_allocation_limit_ = 1101 old_generation_allocation_limit_ =
1115 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0); 1102 OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1116 old_gen_exhausted_ = false; 1103 old_gen_exhausted_ = false;
1117 old_generation_size_configured_ = true;
1118 } else { 1104 } else {
1119 Scavenge(); 1105 Scavenge();
1120 } 1106 }
1121 1107
1122 UpdateSurvivalStatistics(start_new_space_size); 1108 UpdateSurvivalStatistics(start_new_space_size);
1123 ConfigureInitialOldGenerationSize();
1124 1109
1125 isolate_->counters()->objs_since_last_young()->Set(0); 1110 isolate_->counters()->objs_since_last_young()->Set(0);
1126 1111
1127 // Callbacks that fire after this point might trigger nested GCs and 1112 // Callbacks that fire after this point might trigger nested GCs and
1128 // restart incremental marking, the assertion can't be moved down. 1113 // restart incremental marking, the assertion can't be moved down.
1129 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped()); 1114 DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1130 1115
1131 gc_post_processing_depth_++; 1116 gc_post_processing_depth_++;
1132 { 1117 {
1133 AllowHeapAllocation allow_allocation; 1118 AllowHeapAllocation allow_allocation;
(...skipping 1215 matching lines...) Expand 10 before | Expand all | Expand 10 after
2349 2334
2350 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { 2335 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2351 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object)); 2336 SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2352 MapWord first_word = object->map_word(); 2337 MapWord first_word = object->map_word();
2353 SLOW_DCHECK(!first_word.IsForwardingAddress()); 2338 SLOW_DCHECK(!first_word.IsForwardingAddress());
2354 Map* map = first_word.ToMap(); 2339 Map* map = first_word.ToMap();
2355 map->GetHeap()->DoScavengeObject(map, p, object); 2340 map->GetHeap()->DoScavengeObject(map, p, object);
2356 } 2341 }
2357 2342
2358 2343
2359 void Heap::ConfigureInitialOldGenerationSize() {
2360 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2361 old_generation_allocation_limit_ =
2362 Max(kMinimumOldGenerationAllocationLimit,
2363 static_cast<intptr_t>(
2364 static_cast<double>(initial_old_generation_size_) *
2365 (tracer()->AverageSurvivalRate() / 100)));
2366 }
2367 }
2368
2369
2370 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, 2344 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2371 int instance_size) { 2345 int instance_size) {
2372 Object* result; 2346 Object* result;
2373 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); 2347 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2374 if (!allocation.To(&result)) return allocation; 2348 if (!allocation.To(&result)) return allocation;
2375 2349
2376 // Map::cast cannot be used due to uninitialized map field. 2350 // Map::cast cannot be used due to uninitialized map field.
2377 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map()); 2351 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2378 reinterpret_cast<Map*>(result)->set_instance_type(instance_type); 2352 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2379 reinterpret_cast<Map*>(result)->set_instance_size(instance_size); 2353 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
(...skipping 2792 matching lines...) Expand 10 before | Expand all | Expand 10 after
5172 if (FLAG_semi_space_growth_factor < 2) { 5146 if (FLAG_semi_space_growth_factor < 2) {
5173 FLAG_semi_space_growth_factor = 2; 5147 FLAG_semi_space_growth_factor = 2;
5174 } 5148 }
5175 5149
5176 // The old generation is paged and needs at least one page for each space. 5150 // The old generation is paged and needs at least one page for each space.
5177 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; 5151 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5178 max_old_generation_size_ = 5152 max_old_generation_size_ =
5179 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize), 5153 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
5180 max_old_generation_size_); 5154 max_old_generation_size_);
5181 5155
5182 if (FLAG_initial_old_space_size > 0) {
5183 initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
5184 } else {
5185 initial_old_generation_size_ = max_old_generation_size_;
5186 }
5187 old_generation_allocation_limit_ = initial_old_generation_size_;
5188
5189 // We rely on being able to allocate new arrays in paged spaces. 5156 // We rely on being able to allocate new arrays in paged spaces.
5190 DCHECK(Page::kMaxRegularHeapObjectSize >= 5157 DCHECK(Page::kMaxRegularHeapObjectSize >=
5191 (JSArray::kSize + 5158 (JSArray::kSize +
5192 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) + 5159 FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
5193 AllocationMemento::kSize)); 5160 AllocationMemento::kSize));
5194 5161
5195 code_range_size_ = code_range_size * MB; 5162 code_range_size_ = code_range_size * MB;
5196 5163
5197 configured_ = true; 5164 configured_ = true;
5198 return true; 5165 return true;
(...skipping 1229 matching lines...) Expand 10 before | Expand all | Expand 10 after
6428 static_cast<int>(object_sizes_last_time_[index])); 6395 static_cast<int>(object_sizes_last_time_[index]));
6429 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) 6396 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6430 #undef ADJUST_LAST_TIME_OBJECT_COUNT 6397 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6431 6398
6432 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); 6399 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6433 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); 6400 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6434 ClearObjectStats(); 6401 ClearObjectStats();
6435 } 6402 }
6436 } 6403 }
6437 } // namespace v8::internal 6404 } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698