Index: src/heap/heap.cc |
diff --git a/src/heap/heap.cc b/src/heap/heap.cc |
index 51d4dfe7fc3b8a0c08bd1d1b0f8680fb85edf350..6af1a05d09ef8ba3fbb3f4b3b781c5eedbbfa263 100644 |
--- a/src/heap/heap.cc |
+++ b/src/heap/heap.cc |
@@ -20,6 +20,7 @@ |
#include "src/heap/gc-idle-time-handler.h" |
#include "src/heap/incremental-marking.h" |
#include "src/heap/mark-compact.h" |
+#include "src/heap/memory-reducer.h" |
#include "src/heap/objects-visiting-inl.h" |
#include "src/heap/objects-visiting.h" |
#include "src/heap/store-buffer.h" |
@@ -107,8 +108,6 @@ Heap::Heap() |
allocation_timeout_(0), |
#endif // DEBUG |
old_generation_allocation_limit_(initial_old_generation_size_), |
- idle_old_generation_allocation_limit_( |
- kMinimumOldGenerationAllocationLimit), |
old_gen_exhausted_(false), |
inline_allocation_disabled_(false), |
store_buffer_rebuilder_(store_buffer()), |
@@ -144,6 +143,7 @@ Heap::Heap() |
store_buffer_(this), |
marking_(this), |
incremental_marking_(this), |
+ memory_reducer_(this), |
full_codegen_bytes_generated_(0), |
crankshaft_codegen_bytes_generated_(0), |
new_space_allocation_counter_(0), |
@@ -927,6 +927,11 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, |
} |
bool next_gc_likely_to_collect_more = false; |
+ intptr_t committed_memory_before = 0; |
+ |
+ if (collector == MARK_COMPACTOR) { |
+ committed_memory_before = CommittedOldGenerationMemory(); |
+ } |
{ |
tracer()->Start(collector, gc_reason, collector_reason); |
@@ -948,9 +953,20 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, |
} |
if (collector == MARK_COMPACTOR) { |
- gc_idle_time_handler_.NotifyMarkCompact(next_gc_likely_to_collect_more); |
- } else { |
- gc_idle_time_handler_.NotifyScavenge(); |
+ intptr_t committed_memory_after = CommittedOldGenerationMemory(); |
+ intptr_t used_memory_after = PromotedSpaceSizeOfObjects(); |
+ MemoryReducer::Event event; |
+ event.type = MemoryReducer::kMarkCompact; |
+ event.time_ms = MonotonicallyIncreasingTimeInMs(); |
+ // Trigger one more GC if |
+ // - this GC decreased committed memory, |
+ // - there is high fragmentation, |
+ // - there are live detached contexts. |
+ event.next_gc_likely_to_collect_more = |
+ (committed_memory_before - committed_memory_after) > MB || |
+ HasHighFragmentation(used_memory_after, committed_memory_after) || |
+ (detached_contexts()->length() > 0); |
+ memory_reducer_.NotifyMarkCompact(event); |
} |
tracer()->Stop(collector); |
@@ -985,10 +1001,20 @@ int Heap::NotifyContextDisposed(bool dependant_context) { |
AgeInlineCaches(); |
set_retained_maps(ArrayList::cast(empty_fixed_array())); |
tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis()); |
+ MemoryReducer::Event event; |
+ event.type = MemoryReducer::kContextDisposed; |
+ event.time_ms = MonotonicallyIncreasingTimeInMs(); |
+ memory_reducer_.NotifyContextDisposed(event); |
return ++contexts_disposed_; |
} |
+void Heap::StartIdleIncrementalMarking() { |
+ gc_idle_time_handler_.ResetNoProgressCounter(); |
+ incremental_marking()->Start(kReduceMemoryFootprintMask); |
+} |
+ |
+ |
void Heap::MoveElements(FixedArray* array, int dst_index, int src_index, |
int len) { |
if (len == 0) return; |
@@ -4763,6 +4789,21 @@ bool Heap::HasLowAllocationRate() { |
} |
+bool Heap::HasHighFragmentation() { |
+ intptr_t used = PromotedSpaceSizeOfObjects(); |
+ intptr_t committed = CommittedOldGenerationMemory(); |
+ return HasHighFragmentation(used, committed); |
+} |
+ |
+ |
+bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) { |
+ const intptr_t kSlack = 16 * MB; |
+ // Fragmentation is high if committed > 2 * used + kSlack. |
+ // Rewrite the exression to avoid overflow. |
+ return committed - used > used + kSlack; |
+} |
+ |
+ |
void Heap::ReduceNewSpaceSize() { |
if (!FLAG_predictable && HasLowAllocationRate()) { |
new_space_.Shrink(); |
@@ -4789,7 +4830,6 @@ bool Heap::TryFinalizeIdleIncrementalMarking( |
static_cast<size_t>(idle_time_in_ms), size_of_objects, |
final_incremental_mark_compact_speed_in_bytes_per_ms))) { |
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental"); |
- gc_idle_time_handler_.NotifyIdleMarkCompact(); |
return true; |
} |
return false; |
@@ -4820,15 +4860,6 @@ GCIdleTimeHandler::HeapState Heap::ComputeHeapState() { |
heap_state.new_space_capacity = new_space_.Capacity(); |
heap_state.new_space_allocation_throughput_in_bytes_per_ms = |
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond(); |
- heap_state.has_low_allocation_rate = HasLowAllocationRate(); |
- intptr_t limit = old_generation_allocation_limit_; |
- if (heap_state.has_low_allocation_rate) { |
- limit = idle_old_generation_allocation_limit_; |
- } |
- heap_state.can_start_incremental_marking = |
- incremental_marking()->CanBeActivated() && |
- HeapIsFullEnoughToStartIncrementalMarking(limit) && |
- !mark_compact_collector()->sweeping_in_progress(); |
return heap_state; |
} |
@@ -4842,10 +4873,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action, |
result = true; |
break; |
case DO_INCREMENTAL_MARKING: { |
- if (incremental_marking()->IsStopped()) { |
- incremental_marking()->Start( |
- action.reduce_memory ? kReduceMemoryFootprintMask : kNoGCFlags); |
- } |
+ DCHECK(!incremental_marking()->IsStopped()); |
double remaining_idle_time_in_ms = 0.0; |
do { |
incremental_marking()->Step( |
@@ -4866,17 +4894,9 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action, |
break; |
} |
case DO_FULL_GC: { |
- if (action.reduce_memory) { |
- isolate_->compilation_cache()->Clear(); |
- } |
- if (contexts_disposed_) { |
- HistogramTimerScope scope(isolate_->counters()->gc_context()); |
- CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed"); |
- } else { |
- CollectAllGarbage(kReduceMemoryFootprintMask, |
- "idle notification: finalize idle round"); |
- } |
- gc_idle_time_handler_.NotifyIdleMarkCompact(); |
+ DCHECK(contexts_disposed_ > 0); |
+ HistogramTimerScope scope(isolate_->counters()->gc_context()); |
+ CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed"); |
break; |
} |
case DO_SCAVENGE: |
@@ -5612,23 +5632,14 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size, |
factor = kMinHeapGrowingFactor; |
} |
- // TODO(hpayer): Investigate if idle_old_generation_allocation_limit_ is still |
- // needed after taking the allocation rate for the old generation limit into |
- // account. |
- double idle_factor = Min(factor, kMaxHeapGrowingFactorIdle); |
- |
old_generation_allocation_limit_ = |
CalculateOldGenerationAllocationLimit(factor, old_gen_size); |
- idle_old_generation_allocation_limit_ = |
- CalculateOldGenerationAllocationLimit(idle_factor, old_gen_size); |
if (FLAG_trace_gc_verbose) { |
- PrintIsolate( |
- isolate_, |
- "Grow: old size: %" V8_PTR_PREFIX "d KB, new limit: %" V8_PTR_PREFIX |
- "d KB (%.1f), new idle limit: %" V8_PTR_PREFIX "d KB (%.1f)\n", |
- old_gen_size / KB, old_generation_allocation_limit_ / KB, factor, |
- idle_old_generation_allocation_limit_ / KB, idle_factor); |
+ PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX |
+ "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n", |
+ old_gen_size / KB, old_generation_allocation_limit_ / KB, |
+ factor); |
} |
} |