Index: src/heap/heap.cc |
diff --git a/src/heap/heap.cc b/src/heap/heap.cc |
index 2ac5146c46d0cf06c6f00f2bbea878a0015db66b..09c373c8f74b6896a18dc9b9e4967bb5ee9bc5b3 100644 |
--- a/src/heap/heap.cc |
+++ b/src/heap/heap.cc |
@@ -66,7 +66,7 @@ Heap::Heap() |
max_semi_space_size_(8 * (kPointerSize / 4) * MB), |
initial_semispace_size_(Page::kPageSize), |
target_semispace_size_(Page::kPageSize), |
- max_old_generation_size_(700ul * (kPointerSize / 4) * MB), |
+ max_old_generation_size_(kDefaultMaxOldGenSize), |
initial_old_generation_size_(max_old_generation_size_ / |
kInitalOldGenerationLimitFactor), |
old_generation_size_configured_(false), |
@@ -103,6 +103,7 @@ Heap::Heap() |
allocation_timeout_(0), |
#endif // DEBUG |
old_generation_allocation_limit_(initial_old_generation_size_), |
+ old_generation_committed_memory_limit_(kDefaultMaxOldGenSize >> 1), |
old_gen_exhausted_(false), |
inline_allocation_disabled_(false), |
store_buffer_rebuilder_(store_buffer()), |
@@ -860,7 +861,7 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, |
} |
if (collector == MARK_COMPACTOR && |
- !mark_compact_collector()->abort_incremental_marking() && |
+ !mark_compact_collector()->incremental_marking_abort_requested() && |
!incremental_marking()->IsStopped() && |
!incremental_marking()->should_hurry() && |
FLAG_incremental_marking_steps) { |
@@ -903,8 +904,9 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, |
// Start incremental marking for the next cycle. The heap snapshot |
// generator needs incremental marking to stay off after it aborted. |
- if (!mark_compact_collector()->abort_incremental_marking() && |
- WorthActivatingIncrementalMarking()) { |
+ if (!mark_compact_collector()->incremental_marking_abort_requested() && |
+ incremental_marking()->IsStopped() && |
+ incremental_marking()->ShouldActivate()) { |
incremental_marking()->Start(); |
} |
@@ -1161,8 +1163,7 @@ bool Heap::PerformGarbageCollection( |
// Temporarily set the limit for case when PostGarbageCollectionProcessing |
// allocates and triggers GC. The real limit is set at after |
// PostGarbageCollectionProcessing. |
- old_generation_allocation_limit_ = |
- OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0); |
+ SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0, false); |
old_gen_exhausted_ = false; |
old_generation_size_configured_ = true; |
} else { |
@@ -1196,8 +1197,8 @@ bool Heap::PerformGarbageCollection( |
// Register the amount of external allocated memory. |
amount_of_external_allocated_memory_at_last_global_gc_ = |
amount_of_external_allocated_memory_; |
- old_generation_allocation_limit_ = OldGenerationAllocationLimit( |
- PromotedSpaceSizeOfObjects(), freed_global_handles); |
+ SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), |
+ freed_global_handles, true); |
// We finished a marking cycle. We can uncommit the marking deque until |
// we start marking again. |
mark_compact_collector_.UncommitMarkingDeque(); |
@@ -4574,12 +4575,6 @@ bool Heap::TryFinalizeIdleIncrementalMarking( |
} |
-bool Heap::WorthActivatingIncrementalMarking() { |
- return incremental_marking()->IsStopped() && |
- incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull(); |
-} |
- |
- |
static double MonotonicallyIncreasingTimeInMs() { |
return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() * |
static_cast<double>(base::Time::kMillisecondsPerSecond); |
@@ -5231,8 +5226,9 @@ int64_t Heap::PromotedExternalMemorySize() { |
} |
-intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size, |
- int freed_global_handles) { |
+void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size, |
+ int freed_global_handles, |
+ bool weak_callbacks_completed) { |
const int kMaxHandles = 1000; |
const int kMinHandles = 100; |
double min_factor = 1.1; |
@@ -5266,9 +5262,30 @@ intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size, |
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor); |
limit = Max(limit, kMinimumOldGenerationAllocationLimit); |
- limit += new_space_.Capacity(); |
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; |
- return Min(limit, halfway_to_the_max); |
+ |
+ old_generation_allocation_limit_ = limit + new_space_.Capacity(); |
+ |
+ // The committed memory limit is halfway from the current live object count |
+ // to the max size. This means that if half of our allowed extra memory is |
+ // currently taken by fragmentation we will immediately start another |
+ // incremental GC. If there is no fragmentation, we will start incremental |
+ // GC when we have committed half the allowed extra memory. This limit will |
+ // be compared against the committed memory, ie including fragmentation. |
+ intptr_t commit_limit_basis = old_gen_size; |
+ if (FLAG_never_compact || !FLAG_compact_code_space) { |
+ // No point in provoking GCs to get rid of fragmentation if we can't |
+ // actually get rid of fragmentation. In this case set the limit higher. |
+ commit_limit_basis = CommittedOldGenerationMemory(); |
+ } |
+ old_generation_committed_memory_limit_ = |
+ old_gen_size / 2 + max_old_generation_size_ / 2; |
+ |
+ if (weak_callbacks_completed && FLAG_trace_gc) { |
+ PrintPID("%8.0f ms: ", isolate()->time_millis_since_init()); |
+ PrintF("Next GC at %.1f (%.1f) -> %.1f (%.1f)\n", old_gen_size * 1.0 / MB, |
+ CommittedOldGenerationMemory() * 1.0 / MB, limit * 1.0 / MB, |
+ old_generation_committed_memory_limit_ * 1.0 / MB); |
+ } |
} |