Chromium Code Reviews| Index: src/heap/cleanup-gc.cc |
| diff --git a/src/heap/cleanup-gc.cc b/src/heap/cleanup-gc.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..61ce463592151d8b0cd710e6cd75ada25d265f0d |
| --- /dev/null |
| +++ b/src/heap/cleanup-gc.cc |
| @@ -0,0 +1,141 @@ |
| +// Copyright 2014 the V8 project authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "src/flags.h" |
| +#include "src/heap/cleanup-gc.h" |
| +#include "src/heap/heap.h" |
| +#include "src/utils.h" |
| +#include "src/v8.h" |
| + |
| +namespace v8 { |
| +namespace internal { |
| + |
| +const int CleanupGC::kLongDelayMs = 5000; |
| +const int CleanupGC::kShortDelayMs = 500; |
| +const int CleanupGC::kMaxNumberOfGCs = 3; |
| + |
| + |
| +void CleanupGC::TimerTask::Run() { |
| + Heap* heap = cleanup_gc_->heap(); |
| + Event event; |
| + event.type = kTimer; |
| + event.time_ms = heap->MonotonicallyIncreasingTimeInMs(); |
| + event.high_fragmentation = heap->HasHighFragmentation(); |
| + event.low_allocation_rate = heap->HasLowAllocationRate(); |
| + event.incremental_gc_in_progress = !heap->incremental_marking()->IsStopped(); |
| + cleanup_gc_->NotifyTimer(event); |
| +} |
| + |
| + |
| +void CleanupGC::NotifyTimer(const Event& event) { |
| + DCHECK_EQ(kTimer, event.type); |
| + DCHECK_EQ(kWait, state_.action); |
| + state_ = Step(state_, event); |
| + if (state_.action == kRun) { |
| + DCHECK(heap()->incremental_marking()->IsStopped()); |
| + DCHECK(FLAG_incremental_marking); |
| + heap()->incremental_marking()->Start(Heap::kReduceMemoryFootprintMask); |
| + if (FLAG_trace_gc_verbose) { |
| + PrintIsolate(heap()->isolate(), "Clean-up GC: started #%d\n", |
| + state_.started_gcs); |
| + } |
| + } else if (state_.action == kWait) { |
| + // Re-schedule the timer. |
| + ScheduleTimer(state_.next_gc_start_ms - event.time_ms); |
| + if (FLAG_trace_gc_verbose) { |
| + PrintIsolate(heap()->isolate(), "Clean-up GC: wait for %.f ms", |
| + state_.next_gc_start_ms - event.time_ms); |
| + } |
| + } |
| +} |
| + |
| + |
| +void CleanupGC::NotifyMarkCompact(const Event& event) { |
| + DCHECK_EQ(kMarkCompact, event.type); |
| + Action old_action = state_.action; |
| + state_ = Step(state_, event); |
| + if (old_action != kWait && state_.action == kWait) { |
| + // If we are transitioning to the WAIT state, start the timer. |
| + ScheduleTimer(state_.next_gc_start_ms - event.time_ms); |
| + } |
| + if (old_action == kRun) { |
| + if (FLAG_trace_gc_verbose) { |
| + PrintIsolate(heap()->isolate(), "Clean-up GC: finished #%d (%s)\n", |
| + state_.started_gcs, |
| + state_.action == kWait ? "will do more" : "done"); |
| + } |
| + } |
| +} |
| + |
| + |
| +void CleanupGC::NotifyScavenge(const Event& event) { |
| + DCHECK_EQ(kScavenge, event.type); |
| + state_ = Step(state_, event); |
| +} |
| + |
| + |
| +void CleanupGC::NotifyContextDisposed(const Event& event) { |
| + DCHECK_EQ(kContextDisposed, event.type); |
| + Action old_action = state_.action; |
| + state_ = Step(state_, event); |
| + if (old_action != kWait && state_.action == kWait) { |
| + // If we are transitioning to the WAIT state, start the timer. |
| + ScheduleTimer(state_.next_gc_start_ms - event.time_ms); |
| + } |
| +} |
| + |
| + |
| +// For specification of this function see the comment for CleanupGC class. |
| +CleanupGC::State CleanupGC::Step(const State& state, const Event& event) { |
| + if (!FLAG_incremental_marking) { |
| + return State(kDone, 0, 0); |
| + } |
| + switch (state.action) { |
| + case kDone: |
| + if (event.type == kScavenge || event.type == kTimer) { |
| + return state; |
| + } else { |
| + DCHECK(event.type == kContextDisposed || event.type == kMarkCompact); |
| + return State(kWait, 0, event.time_ms + kLongDelayMs); |
| + } |
| + case kWait: |
| + if (event.type == kContextDisposed) { |
| + return state; |
| + } else if (event.type == kTimer && !event.incremental_gc_in_progress && |
| + (event.low_allocation_rate || event.high_fragmentation)) { |
| + if (state.next_gc_start_ms <= event.time_ms) { |
| + return State(kRun, state.started_gcs + 1, 0.0); |
| + } else { |
| + return state; |
| + } |
| + } else { |
| + return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs); |
| + } |
| + case kRun: |
| + if (event.type != kMarkCompact) { |
| + return state; |
| + } else { |
| + if (state.started_gcs < kMaxNumberOfGCs && |
| + (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) { |
| + return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs); |
| + } else { |
| + return State(kDone, 0, 0.0); |
| + } |
| + } |
| + } |
| + UNREACHABLE(); |
| + return State(kDone, 0, 0); // Make the compiler happy. |
| +} |
| + |
| + |
| +void CleanupGC::ScheduleTimer(double delay_ms) { |
|
Hannes Payer (out of office)
2015/07/01 12:03:16
Can we DCHECK that delay_ms is always positive.
ulan
2015/07/01 12:51:48
Done.
|
| + // Leave some room for precision error in task scheduler. |
| + const double kSlackMs = 100; |
| + v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate()); |
| + V8::GetCurrentPlatform()->CallDelayedOnForegroundThread( |
| + isolate, new CleanupGC::TimerTask(this), (delay_ms + kSlackMs) / 1000.0); |
|
Hannes Payer (out of office)
2015/07/01 12:03:16
where is the delete?
ulan
2015/07/01 12:51:48
Platform will delete it.
|
| +} |
| + |
| +} // internal |
| +} // v8 |