Index: src/heap/heap.cc |
diff --git a/src/heap/heap.cc b/src/heap/heap.cc |
index c4a368ea5b87cf564a5263c56dc05de002966d93..5dd533a96f279b74285de7418ff8f246983d04cd 100644 |
--- a/src/heap/heap.cc |
+++ b/src/heap/heap.cc |
@@ -66,7 +66,7 @@ Heap::Heap() |
max_semi_space_size_(8 * (kPointerSize / 4) * MB), |
initial_semispace_size_(Page::kPageSize), |
target_semispace_size_(Page::kPageSize), |
- max_old_generation_size_(700ul * (kPointerSize / 4) * MB), |
+ max_old_generation_size_(kDefaultMaxOldGenSize), |
initial_old_generation_size_(max_old_generation_size_ / |
kInitalOldGenerationLimitFactor), |
old_generation_size_configured_(false), |
@@ -103,6 +103,7 @@ Heap::Heap() |
allocation_timeout_(0), |
#endif // DEBUG |
old_generation_allocation_limit_(initial_old_generation_size_), |
+ old_generation_committed_memory_limit_(kDefaultMaxOldGenSize >> 1), |
old_gen_exhausted_(false), |
inline_allocation_disabled_(false), |
store_buffer_rebuilder_(store_buffer()), |
@@ -861,7 +862,7 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, |
} |
if (collector == MARK_COMPACTOR && |
- !mark_compact_collector()->abort_incremental_marking() && |
+ !mark_compact_collector()->incremental_marking_abort_requested() && |
!incremental_marking()->IsStopped() && |
!incremental_marking()->should_hurry() && |
FLAG_incremental_marking_steps) { |
@@ -904,8 +905,9 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, |
// Start incremental marking for the next cycle. The heap snapshot |
// generator needs incremental marking to stay off after it aborted. |
- if (!mark_compact_collector()->abort_incremental_marking() && |
- WorthActivatingIncrementalMarking()) { |
+ if (!mark_compact_collector()->incremental_marking_abort_requested() && |
+ incremental_marking()->IsStopped() && |
+ incremental_marking()->ShouldActivate()) { |
incremental_marking()->Start(); |
} |
@@ -1162,8 +1164,7 @@ bool Heap::PerformGarbageCollection( |
// Temporarily set the limit for case when PostGarbageCollectionProcessing |
// allocates and triggers GC. The real limit is set at after |
// PostGarbageCollectionProcessing. |
- old_generation_allocation_limit_ = |
- OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0); |
+ SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0, false); |
old_gen_exhausted_ = false; |
old_generation_size_configured_ = true; |
} else { |
@@ -1197,8 +1198,8 @@ bool Heap::PerformGarbageCollection( |
// Register the amount of external allocated memory. |
amount_of_external_allocated_memory_at_last_global_gc_ = |
amount_of_external_allocated_memory_; |
- old_generation_allocation_limit_ = OldGenerationAllocationLimit( |
- PromotedSpaceSizeOfObjects(), freed_global_handles); |
+ SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), |
+ freed_global_handles, true); |
// We finished a marking cycle. We can uncommit the marking deque until |
// we start marking again. |
mark_compact_collector_.UncommitMarkingDeque(); |
@@ -4589,12 +4590,6 @@ bool Heap::TryFinalizeIdleIncrementalMarking( |
} |
-bool Heap::WorthActivatingIncrementalMarking() { |
- return incremental_marking()->IsStopped() && |
- incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull(); |
-} |
- |
- |
static double MonotonicallyIncreasingTimeInMs() { |
return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() * |
static_cast<double>(base::Time::kMillisecondsPerSecond); |
@@ -5239,8 +5234,9 @@ int64_t Heap::PromotedExternalMemorySize() { |
} |
-intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size, |
- int freed_global_handles) { |
+void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size, |
+ int freed_global_handles, |
+ bool weak_callbacks_completed) { |
const int kMaxHandles = 1000; |
const int kMinHandles = 100; |
double min_factor = 1.1; |
@@ -5274,9 +5270,30 @@ intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size, |
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor); |
limit = Max(limit, kMinimumOldGenerationAllocationLimit); |
- limit += new_space_.Capacity(); |
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2; |
- return Min(limit, halfway_to_the_max); |
+ |
+ old_generation_allocation_limit_ = limit + new_space_.Capacity(); |
ulan
2015/04/07 13:27:19
Should we enforce that old_generation_allocation_l
Erik Corry Chromium.org
2015/04/09 12:26:15
I think since we consult both, it is clearer to le
|
+ |
+ // The committed memory limit is halfway from the current live object count |
+ // to the max size. This means that if half of our allowed extra memory is |
+ // currently taken by fragmentation we will immediately start another |
+ // incremental GC. If there is no fragmentation, we will start incremental |
+ // GC when we have committed half the allowed extra memory. This limit will |
+ // be compared against the committed memory, ie including fragmentation. |
+ intptr_t commit_limit_basis = old_gen_size; |
ulan
2015/04/07 13:27:19
This can lead to never-ending GCs even if fragment
Erik Corry Chromium.org
2015/04/09 12:26:15
Done.
|
+ if (FLAG_never_compact || !FLAG_compact_code_space) { |
+ // No point in provoking GCs to get rid of fragmentation if we can't |
+ // actually get rid of fragmentation. In this case set the limit higher. |
+ commit_limit_basis = CommittedOldGenerationMemory(); |
+ } |
+ old_generation_committed_memory_limit_ = |
+ commit_limit_basis / 2 + max_old_generation_size_ / 2; |
+ |
+ if (weak_callbacks_completed && FLAG_trace_gc) { |
+ PrintPID("%8.0f ms: ", isolate()->time_millis_since_init()); |
+ PrintF("Next GC at %.1f (%.1f) -> %.1f (%.1f)\n", old_gen_size * 1.0 / MB, |
+ CommittedOldGenerationMemory() * 1.0 / MB, limit * 1.0 / MB, |
+ old_generation_committed_memory_limit_ * 1.0 / MB); |
+ } |
} |