OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/once.h" | 10 #include "src/base/once.h" |
(...skipping 1200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1211 // We speed up the incremental marker if it is running so that it | 1211 // We speed up the incremental marker if it is running so that it |
1212 // does not fall behind the rate of promotion, which would cause a | 1212 // does not fall behind the rate of promotion, which would cause a |
1213 // constantly growing old space. | 1213 // constantly growing old space. |
1214 incremental_marking()->NotifyOfHighPromotionRate(); | 1214 incremental_marking()->NotifyOfHighPromotionRate(); |
1215 } | 1215 } |
1216 | 1216 |
1217 if (collector == MARK_COMPACTOR) { | 1217 if (collector == MARK_COMPACTOR) { |
1218 // Perform mark-sweep with optional compaction. | 1218 // Perform mark-sweep with optional compaction. |
1219 MarkCompact(); | 1219 MarkCompact(); |
1220 sweep_generation_++; | 1220 sweep_generation_++; |
1221 // Temporarily set the limit for case when PostGarbageCollectionProcessing | |
1222 // allocates and triggers GC. The real limit is set at after | |
1223 // PostGarbageCollectionProcessing. | |
1224 const double kConservativeFactor = 1.5; | |
1225 SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), | |
1226 kConservativeFactor); | |
1221 old_gen_exhausted_ = false; | 1227 old_gen_exhausted_ = false; |
1222 old_generation_size_configured_ = true; | 1228 old_generation_size_configured_ = true; |
1223 } else { | 1229 } else { |
1224 Scavenge(); | 1230 Scavenge(); |
1225 } | 1231 } |
1226 | 1232 |
1227 // This should be updated before PostGarbageCollectionProcessing, which can | 1233 // This should be updated before PostGarbageCollectionProcessing, which can |
1228 // cause another GC. | 1234 // cause another GC. |
1229 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects(); | 1235 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects(); |
1230 | 1236 |
(...skipping 13 matching lines...) Expand all Loading... | |
1244 freed_global_handles = | 1250 freed_global_handles = |
1245 isolate_->global_handles()->PostGarbageCollectionProcessing(collector); | 1251 isolate_->global_handles()->PostGarbageCollectionProcessing(collector); |
1246 } | 1252 } |
1247 gc_post_processing_depth_--; | 1253 gc_post_processing_depth_--; |
1248 | 1254 |
1249 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); | 1255 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); |
1250 | 1256 |
1251 // Update relocatables. | 1257 // Update relocatables. |
1252 Relocatable::PostGarbageCollectionProcessing(isolate_); | 1258 Relocatable::PostGarbageCollectionProcessing(isolate_); |
1253 | 1259 |
1260 size_t total_committed = 0; | |
1261 int fragmentation_percent = | |
1262 FragmentationOfCompactedSpacesInPercent(&total_committed); | |
Hannes Payer (out of office)
2015/06/03 06:32:57
Can we have a separate function which returns tota
| |
1263 // Ignore fragmentation of non-compactable heaps and small heaps. | |
1264 const size_t kSmallHeapThreshold = 8 * Page::kPageSize; | |
1265 if (FLAG_never_compact || total_committed <= kSmallHeapThreshold) { | |
1266 fragmentation_percent = 0; | |
1267 } | |
1268 | |
1254 if (collector == MARK_COMPACTOR) { | 1269 if (collector == MARK_COMPACTOR) { |
1255 // Register the amount of external allocated memory. | 1270 // Register the amount of external allocated memory. |
1256 amount_of_external_allocated_memory_at_last_global_gc_ = | 1271 amount_of_external_allocated_memory_at_last_global_gc_ = |
1257 amount_of_external_allocated_memory_; | 1272 amount_of_external_allocated_memory_; |
1258 SetOldGenerationAllocationLimit( | 1273 double factor = HeapGrowingFactor( |
1259 PromotedSpaceSizeOfObjects(), | 1274 freed_global_handles, |
1260 tracer()->CurrentAllocationThroughputInBytesPerMillisecond()); | 1275 tracer()->CurrentAllocationThroughputInBytesPerMillisecond(), |
1276 fragmentation_percent); | |
1277 SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), factor); | |
1261 // We finished a marking cycle. We can uncommit the marking deque until | 1278 // We finished a marking cycle. We can uncommit the marking deque until |
1262 // we start marking again. | 1279 // we start marking again. |
1263 mark_compact_collector_.UncommitMarkingDeque(); | 1280 mark_compact_collector_.UncommitMarkingDeque(); |
1264 } | 1281 } |
1265 | 1282 |
1266 { | 1283 { |
1267 GCCallbacksScope scope(this); | 1284 GCCallbacksScope scope(this); |
1268 if (scope.CheckReenter()) { | 1285 if (scope.CheckReenter()) { |
1269 AllowHeapAllocation allow_allocation; | 1286 AllowHeapAllocation allow_allocation; |
1270 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); | 1287 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL); |
1271 VMState<EXTERNAL> state(isolate_); | 1288 VMState<EXTERNAL> state(isolate_); |
1272 HandleScope handle_scope(isolate_); | 1289 HandleScope handle_scope(isolate_); |
1273 CallGCEpilogueCallbacks(gc_type, gc_callback_flags); | 1290 CallGCEpilogueCallbacks(gc_type, gc_callback_flags); |
1274 } | 1291 } |
1275 } | 1292 } |
1276 | 1293 |
1277 #ifdef VERIFY_HEAP | 1294 #ifdef VERIFY_HEAP |
1278 if (FLAG_verify_heap) { | 1295 if (FLAG_verify_heap) { |
1279 VerifyStringTable(this); | 1296 VerifyStringTable(this); |
1280 } | 1297 } |
1281 #endif | 1298 #endif |
1282 | 1299 |
1283 return freed_global_handles > 0; | 1300 return NextGCLikelyToFreeMore(freed_global_handles, fragmentation_percent); |
1284 } | 1301 } |
1285 | 1302 |
1286 | 1303 |
1287 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { | 1304 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { |
1288 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { | 1305 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { |
1289 if (gc_type & gc_prologue_callbacks_[i].gc_type) { | 1306 if (gc_type & gc_prologue_callbacks_[i].gc_type) { |
1290 if (!gc_prologue_callbacks_[i].pass_isolate_) { | 1307 if (!gc_prologue_callbacks_[i].pass_isolate_) { |
1291 v8::GCPrologueCallback callback = | 1308 v8::GCPrologueCallback callback = |
1292 reinterpret_cast<v8::GCPrologueCallback>( | 1309 reinterpret_cast<v8::GCPrologueCallback>( |
1293 gc_prologue_callbacks_[i].callback); | 1310 gc_prologue_callbacks_[i].callback); |
(...skipping 4089 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5383 CHECK(factor > 1.0); | 5400 CHECK(factor > 1.0); |
5384 CHECK(old_gen_size > 0); | 5401 CHECK(old_gen_size > 0); |
5385 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor); | 5402 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor); |
5386 limit = Max(limit, old_gen_size + kMinimumOldGenerationAllocationLimit); | 5403 limit = Max(limit, old_gen_size + kMinimumOldGenerationAllocationLimit); |
5387 limit += new_space_.Capacity(); | 5404 limit += new_space_.Capacity(); |
5388 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; | 5405 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; |
5389 return Min(limit, halfway_to_the_max); | 5406 return Min(limit, halfway_to_the_max); |
5390 } | 5407 } |
5391 | 5408 |
5392 | 5409 |
5393 void Heap::SetOldGenerationAllocationLimit( | 5410 double LinearInterpolation(double x1, double y1, double x2, double y2, |
5394 intptr_t old_gen_size, size_t current_allocation_throughput) { | 5411 double x) { |
5395 // Allocation throughput on Android devices is typically lower than on | 5412 DCHECK(x1 < x2); |
5396 // non-mobile devices. | 5413 if (x <= x1) return y1; |
5397 #if V8_OS_ANDROID | 5414 if (x >= x2) return y2; |
5398 const size_t kHighThroughput = 2500; | 5415 return y1 + (y2 - y1) / (x2 - x1) * (x - x1); |
5399 const size_t kLowThroughput = 250; | 5416 } |
5400 #else | 5417 |
5401 const size_t kHighThroughput = 10000; | 5418 |
5402 const size_t kLowThroughput = 1000; | 5419 double Heap::HeapGrowingFactor(int freed_handles, size_t allocation_throughput, |
5403 #endif | 5420 int fragmentation_percent) { |
5404 const double min_scaling_factor = 1.1; | 5421 const int kLowHandles = 100; |
5405 const double max_scaling_factor = 1.5; | 5422 const int kHighHandles = 1000; |
5406 double max_factor = 4; | 5423 |
5407 const double idle_max_factor = 1.5; | 5424 const size_t kPointerMultiplier = i::kPointerSize / 4; |
5408 // We set the old generation growing factor to 2 to grow the heap slower on | 5425 // We use kHighThroughputFactor if allocation_throughput >= kHighThroughput. |
5409 // memory-constrained devices. | 5426 // Otherwise, we linearly interpolate between (kLowThroughput, kMinFactor) |
5410 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) { | 5427 // and (kMediumThroughput, kMaxFactor). |
5411 max_factor = 2; | 5428 const size_t kHighThroughput = 10000 * kPointerMultiplier; |
5429 const size_t kMediumThroughput = 5000 * kPointerMultiplier; | |
5430 const size_t kLowThroughput = 500 * kPointerMultiplier; | |
5431 | |
5432 const double kHighThroughputFactor = 4.0; | |
5433 const double kHighThroughputFactorForMemoryConstrained = 2.0; | |
5434 const double kMaxFactor = 2.0; | |
5435 const double kMinFactor = 1.1; | |
5436 | |
5437 if (allocation_throughput == 0) { | |
5438 // If we have no allocation throughput estimate, we conservatively assume | |
5439 // that it is medium. | |
5440 allocation_throughput = kMediumThroughput; | |
5412 } | 5441 } |
5442 double factor1 = LinearInterpolation(kLowHandles, kMaxFactor, kHighHandles, | |
5443 kMinFactor, freed_handles); | |
5444 double factor2 = | |
5445 LinearInterpolation(kLowThroughput, kMinFactor, kMediumThroughput, | |
5446 kMaxFactor, allocation_throughput); | |
5447 double factor3 = LinearInterpolation(kLowFragmentationPercent, kMaxFactor, | |
5448 kHighFragmentationPercent, kMinFactor, | |
5449 fragmentation_percent); | |
5413 | 5450 |
Hannes Payer (out of office)
2015/06/03 06:32:57
This function needs more comments about the magic
| |
5414 double factor; | 5451 double factor = (factor1 + factor2 + factor3) / 3; |
5415 double idle_factor; | 5452 if (allocation_throughput >= kHighThroughput && freed_handles < kLowHandles) { |
5416 if (current_allocation_throughput == 0 || | 5453 // We need high throughput, schedule GC late. |
5417 current_allocation_throughput >= kHighThroughput) { | 5454 // We set the old generation growing factor to 2 to grow the heap slower on |
5418 factor = max_factor; | 5455 // memory-constrained devices. |
5419 } else if (current_allocation_throughput <= kLowThroughput) { | 5456 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) { |
5420 factor = min_scaling_factor; | 5457 factor = kHighThroughputFactorForMemoryConstrained; |
5421 } else { | 5458 } else { |
5422 // Compute factor using linear interpolation between points | 5459 factor = kHighThroughputFactor; |
5423 // (kHighThroughput, max_scaling_factor) and (kLowThroughput, min_factor). | 5460 } |
5424 factor = min_scaling_factor + | |
5425 (current_allocation_throughput - kLowThroughput) * | |
5426 (max_scaling_factor - min_scaling_factor) / | |
5427 (kHighThroughput - kLowThroughput); | |
5428 } | 5461 } |
5429 | 5462 |
5430 if (FLAG_stress_compaction || | 5463 if (FLAG_stress_compaction || |
5431 mark_compact_collector()->reduce_memory_footprint_) { | 5464 mark_compact_collector()->reduce_memory_footprint_) { |
5432 factor = min_scaling_factor; | 5465 factor = kMinFactor; |
5433 } | 5466 } |
5434 | 5467 |
5468 if (FLAG_trace_gc_verbose) { | |
5469 PrintIsolate(isolate_, | |
5470 "Freed handles: %d (factor %.1f)," | |
5471 "allocation throughput: %u (factor %.1f)," | |
5472 "fragmentation: %d (factor %.1f), combined factor: %.1f\n", | |
5473 freed_handles, factor1, allocation_throughput, factor2, | |
5474 fragmentation_percent, factor3, factor); | |
5475 } | |
5476 return factor; | |
5477 } | |
5478 | |
5479 | |
5480 void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size, | |
5481 double factor) { | |
5482 const double idle_max_factor = 1.5; | |
5483 | |
5435 // TODO(hpayer): Investigate if idle_old_generation_allocation_limit_ is still | 5484 // TODO(hpayer): Investigate if idle_old_generation_allocation_limit_ is still |
5436 // needed after taking the allocation rate for the old generation limit into | 5485 // needed after taking the allocation rate for the old generation limit into |
5437 // account. | 5486 // account. |
5438 idle_factor = Min(factor, idle_max_factor); | 5487 const double idle_factor = Min(factor, idle_max_factor); |
5439 | 5488 |
5440 old_generation_allocation_limit_ = | 5489 old_generation_allocation_limit_ = |
5441 CalculateOldGenerationAllocationLimit(factor, old_gen_size); | 5490 CalculateOldGenerationAllocationLimit(factor, old_gen_size); |
5442 idle_old_generation_allocation_limit_ = | 5491 idle_old_generation_allocation_limit_ = |
5443 CalculateOldGenerationAllocationLimit(idle_factor, old_gen_size); | 5492 CalculateOldGenerationAllocationLimit(idle_factor, old_gen_size); |
5444 | 5493 |
5445 if (FLAG_trace_gc_verbose) { | 5494 if (FLAG_trace_gc_verbose) { |
5446 PrintIsolate( | 5495 PrintIsolate( |
5447 isolate_, | 5496 isolate_, |
5448 "Grow: old size: %" V8_PTR_PREFIX "d KB, new limit: %" V8_PTR_PREFIX | 5497 "Grow: old size: %" V8_PTR_PREFIX "d KB, new limit: %" V8_PTR_PREFIX |
(...skipping 1216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6665 *object_type = "CODE_TYPE"; \ | 6714 *object_type = "CODE_TYPE"; \ |
6666 *object_sub_type = "CODE_AGE/" #name; \ | 6715 *object_sub_type = "CODE_AGE/" #name; \ |
6667 return true; | 6716 return true; |
6668 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) | 6717 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) |
6669 #undef COMPARE_AND_RETURN_NAME | 6718 #undef COMPARE_AND_RETURN_NAME |
6670 } | 6719 } |
6671 return false; | 6720 return false; |
6672 } | 6721 } |
6673 } | 6722 } |
6674 } // namespace v8::internal | 6723 } // namespace v8::internal |
OLD | NEW |