Chromium Code Reviews| Index: src/heap/heap.cc |
| diff --git a/src/heap/heap.cc b/src/heap/heap.cc |
| index 27c24f34b15fd5a9bedd121cad06463456da8f6b..baa789c446f69b5ad61f09ebb33a8d583cf4b1d1 100644 |
| --- a/src/heap/heap.cc |
| +++ b/src/heap/heap.cc |
| @@ -17,6 +17,7 @@ |
| #include "src/debug.h" |
| #include "src/deoptimizer.h" |
| #include "src/global-handles.h" |
| +#include "src/heap/cleanup-gc.h" |
| #include "src/heap/gc-idle-time-handler.h" |
| #include "src/heap/incremental-marking.h" |
| #include "src/heap/mark-compact.h" |
| @@ -107,8 +108,6 @@ Heap::Heap() |
| allocation_timeout_(0), |
| #endif // DEBUG |
| old_generation_allocation_limit_(initial_old_generation_size_), |
| - idle_old_generation_allocation_limit_( |
| - kMinimumOldGenerationAllocationLimit), |
| old_gen_exhausted_(false), |
| inline_allocation_disabled_(false), |
| store_buffer_rebuilder_(store_buffer()), |
| @@ -143,6 +142,7 @@ Heap::Heap() |
| store_buffer_(this), |
| marking_(this), |
| incremental_marking_(this), |
| + cleanup_gc_(this), |
| full_codegen_bytes_generated_(0), |
| crankshaft_codegen_bytes_generated_(0), |
| new_space_allocation_counter_(0), |
| @@ -926,6 +926,11 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, |
| } |
| bool next_gc_likely_to_collect_more = false; |
| + intptr_t committed_memory_before; |
| + |
| + if (collector == MARK_COMPACTOR) { |
| + committed_memory_before = CommittedOldGenerationMemory(); |
| + } |
| { |
| tracer()->Start(collector, gc_reason, collector_reason); |
| @@ -947,9 +952,25 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, |
| } |
| if (collector == MARK_COMPACTOR) { |
| - gc_idle_time_handler_.NotifyMarkCompact(next_gc_likely_to_collect_more); |
| + intptr_t committed_memory_after = CommittedOldGenerationMemory(); |
| + intptr_t used_memory_after = PromotedSpaceSizeOfObjects(); |
| + CleanupGC::Event event; |
| + event.type = CleanupGC::kMarkCompact; |
| + event.time_ms = MonotonicallyIncreasingTimeInMs(); |
| + // Trigger one more GC if |
| + // - this GC decreased committed memory, |
| + // - there is high fragmentation, |
| + // - there are live detached contexts. |
| + event.next_gc_likely_to_collect_more = |
| + (committed_memory_before - committed_memory_after) > MB || |
| + HasHighFragmentation(used_memory_after, committed_memory_after) || |
| + (detached_contexts()->length() > 0); |
| + cleanup_gc_.NotifyMarkCompact(event); |
| } else { |
| - gc_idle_time_handler_.NotifyScavenge(); |
| + CleanupGC::Event event; |
| + event.type = CleanupGC::kScavenge; |
| + event.time_ms = MonotonicallyIncreasingTimeInMs(); |
| + cleanup_gc_.NotifyScavenge(event); |
| } |
| tracer()->Stop(collector); |
| @@ -984,6 +1005,10 @@ int Heap::NotifyContextDisposed(bool dependant_context) { |
| AgeInlineCaches(); |
| set_retained_maps(ArrayList::cast(empty_fixed_array())); |
| tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis()); |
| + CleanupGC::Event event; |
| + event.type = CleanupGC::kContextDisposed; |
| + event.time_ms = MonotonicallyIncreasingTimeInMs(); |
| + cleanup_gc_.NotifyContextDisposed(event); |
| return ++contexts_disposed_; |
| } |
| @@ -4746,6 +4771,21 @@ bool Heap::HasLowAllocationRate() { |
| } |
| +bool Heap::HasHighFragmentation() { |
| + intptr_t used = PromotedSpaceSizeOfObjects(); |
| + intptr_t committed = CommittedOldGenerationMemory(); |
| + return HasHighFragmentation(used, committed); |
| +} |
| + |
| + |
| +bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) { |
| + const intptr_t kSlack = 16 * MB; |
| + // Fragmentation is high if committed > 2 * used + kSlack. |
| + // Rewrite the exression to avoid overflow. |
| + return committed - used > used + kSlack; |
| +} |
| + |
| + |
| void Heap::ReduceNewSpaceSize() { |
| if (!FLAG_predictable && HasLowAllocationRate()) { |
| new_space_.Shrink(); |
| @@ -4772,7 +4812,6 @@ bool Heap::TryFinalizeIdleIncrementalMarking( |
| static_cast<size_t>(idle_time_in_ms), size_of_objects, |
| final_incremental_mark_compact_speed_in_bytes_per_ms))) { |
| CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental"); |
| - gc_idle_time_handler_.NotifyIdleMarkCompact(); |
| return true; |
| } |
| return false; |
| @@ -4803,15 +4842,6 @@ GCIdleTimeHandler::HeapState Heap::ComputeHeapState() { |
| heap_state.new_space_capacity = new_space_.Capacity(); |
| heap_state.new_space_allocation_throughput_in_bytes_per_ms = |
| tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond(); |
| - heap_state.has_low_allocation_rate = HasLowAllocationRate(); |
| - intptr_t limit = old_generation_allocation_limit_; |
| - if (heap_state.has_low_allocation_rate) { |
| - limit = idle_old_generation_allocation_limit_; |
| - } |
| - heap_state.can_start_incremental_marking = |
| - incremental_marking()->CanBeActivated() && |
| - HeapIsFullEnoughToStartIncrementalMarking(limit) && |
| - !mark_compact_collector()->sweeping_in_progress(); |
| return heap_state; |
| } |
| @@ -4825,10 +4855,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action, |
| result = true; |
| break; |
| case DO_INCREMENTAL_MARKING: { |
| - if (incremental_marking()->IsStopped()) { |
| - incremental_marking()->Start( |
| - action.reduce_memory ? kReduceMemoryFootprintMask : kNoGCFlags); |
| - } |
| + DCHECK(!incremental_marking()->IsStopped()); |
| double remaining_idle_time_in_ms = 0.0; |
| do { |
| incremental_marking()->Step( |
| @@ -4849,17 +4876,13 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action, |
| break; |
| } |
| case DO_FULL_GC: { |
| - if (action.reduce_memory) { |
| - isolate_->compilation_cache()->Clear(); |
| - } |
| if (contexts_disposed_) { |
| HistogramTimerScope scope(isolate_->counters()->gc_context()); |
| CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed"); |
| } else { |
| CollectAllGarbage(kReduceMemoryFootprintMask, |
|
Hannes Payer (out of office)
2015/07/01 12:03:17
Could this call end an ongoing incremental marking
ulan
2015/07/01 12:51:48
I removed this completely and added DCHECK(context
|
| - "idle notification: finalize idle round"); |
| + "idle notification: non-incremental mark-compact"); |
| } |
| - gc_idle_time_handler_.NotifyIdleMarkCompact(); |
| break; |
| } |
| case DO_SCAVENGE: |
| @@ -5594,23 +5617,14 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size, |
| factor = kMinHeapGrowingFactor; |
| } |
| - // TODO(hpayer): Investigate if idle_old_generation_allocation_limit_ is still |
| - // needed after taking the allocation rate for the old generation limit into |
| - // account. |
| - double idle_factor = Min(factor, kMaxHeapGrowingFactorIdle); |
| - |
| old_generation_allocation_limit_ = |
| CalculateOldGenerationAllocationLimit(factor, old_gen_size); |
| - idle_old_generation_allocation_limit_ = |
| - CalculateOldGenerationAllocationLimit(idle_factor, old_gen_size); |
| if (FLAG_trace_gc_verbose) { |
| - PrintIsolate( |
| - isolate_, |
| - "Grow: old size: %" V8_PTR_PREFIX "d KB, new limit: %" V8_PTR_PREFIX |
| - "d KB (%.1f), new idle limit: %" V8_PTR_PREFIX "d KB (%.1f)\n", |
| - old_gen_size / KB, old_generation_allocation_limit_ / KB, factor, |
| - idle_old_generation_allocation_limit_ / KB, idle_factor); |
| + PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX |
| + "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n", |
| + old_gen_size / KB, old_generation_allocation_limit_ / KB, |
| + factor); |
| } |
| } |