| OLD | NEW |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/memory-reducer.h" | 5 #include "src/heap/memory-reducer.h" |
| 6 | 6 |
| 7 #include "src/flags.h" | 7 #include "src/flags.h" |
| 8 #include "src/heap/gc-tracer.h" | 8 #include "src/heap/gc-tracer.h" |
| 9 #include "src/heap/heap-inl.h" | 9 #include "src/heap/heap-inl.h" |
| 10 #include "src/utils.h" | 10 #include "src/utils.h" |
| (...skipping 17 matching lines...) Expand all Loading... |
| 28 Heap* heap = memory_reducer_->heap(); | 28 Heap* heap = memory_reducer_->heap(); |
| 29 Event event; | 29 Event event; |
| 30 double time_ms = heap->MonotonicallyIncreasingTimeInMs(); | 30 double time_ms = heap->MonotonicallyIncreasingTimeInMs(); |
| 31 heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(), | 31 heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(), |
| 32 heap->OldGenerationAllocationCounter()); | 32 heap->OldGenerationAllocationCounter()); |
| 33 double js_call_rate = memory_reducer_->SampleAndGetJsCallsPerMs(time_ms); | 33 double js_call_rate = memory_reducer_->SampleAndGetJsCallsPerMs(time_ms); |
| 34 bool low_allocation_rate = heap->HasLowAllocationRate(); | 34 bool low_allocation_rate = heap->HasLowAllocationRate(); |
| 35 bool is_idle = js_call_rate < kJsCallsPerMsThreshold && low_allocation_rate; | 35 bool is_idle = js_call_rate < kJsCallsPerMsThreshold && low_allocation_rate; |
| 36 bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage(); | 36 bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage(); |
| 37 if (FLAG_trace_gc_verbose) { | 37 if (FLAG_trace_gc_verbose) { |
| 38 PrintIsolate(heap->isolate(), "Memory reducer: call rate %.3lf, %s, %s\n", | 38 heap->isolate()->PrintWithTimestamp( |
| 39 js_call_rate, low_allocation_rate ? "low alloc" : "high alloc", | 39 "Memory reducer: call rate %.3lf, %s, %s\n", js_call_rate, |
| 40 optimize_for_memory ? "background" : "foreground"); | 40 low_allocation_rate ? "low alloc" : "high alloc", |
| 41 optimize_for_memory ? "background" : "foreground"); |
| 41 } | 42 } |
| 42 event.type = kTimer; | 43 event.type = kTimer; |
| 43 event.time_ms = time_ms; | 44 event.time_ms = time_ms; |
| 44 // The memory reducer will start incremental markig if | 45 // The memory reducer will start incremental markig if |
| 45 // 1) mutator is likely idle: js call rate is low and allocation rate is low. | 46 // 1) mutator is likely idle: js call rate is low and allocation rate is low. |
| 46 // 2) mutator is in background: optimize for memory flag is set. | 47 // 2) mutator is in background: optimize for memory flag is set. |
| 47 event.should_start_incremental_gc = is_idle || optimize_for_memory; | 48 event.should_start_incremental_gc = is_idle || optimize_for_memory; |
| 48 event.can_start_incremental_gc = | 49 event.can_start_incremental_gc = |
| 49 heap->incremental_marking()->IsStopped() && | 50 heap->incremental_marking()->IsStopped() && |
| 50 (heap->incremental_marking()->CanBeActivated() || optimize_for_memory); | 51 (heap->incremental_marking()->CanBeActivated() || optimize_for_memory); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 63 | 64 |
| 64 | 65 |
| 65 void MemoryReducer::NotifyTimer(const Event& event) { | 66 void MemoryReducer::NotifyTimer(const Event& event) { |
| 66 DCHECK_EQ(kTimer, event.type); | 67 DCHECK_EQ(kTimer, event.type); |
| 67 DCHECK_EQ(kWait, state_.action); | 68 DCHECK_EQ(kWait, state_.action); |
| 68 state_ = Step(state_, event); | 69 state_ = Step(state_, event); |
| 69 if (state_.action == kRun) { | 70 if (state_.action == kRun) { |
| 70 DCHECK(heap()->incremental_marking()->IsStopped()); | 71 DCHECK(heap()->incremental_marking()->IsStopped()); |
| 71 DCHECK(FLAG_incremental_marking); | 72 DCHECK(FLAG_incremental_marking); |
| 72 if (FLAG_trace_gc_verbose) { | 73 if (FLAG_trace_gc_verbose) { |
| 73 PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n", | 74 heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n", |
| 74 state_.started_gcs); | 75 state_.started_gcs); |
| 75 } | 76 } |
| 76 heap()->StartIdleIncrementalMarking( | 77 heap()->StartIdleIncrementalMarking( |
| 77 GarbageCollectionReason::kMemoryReducer); | 78 GarbageCollectionReason::kMemoryReducer); |
| 78 } else if (state_.action == kWait) { | 79 } else if (state_.action == kWait) { |
| 79 if (!heap()->incremental_marking()->IsStopped() && | 80 if (!heap()->incremental_marking()->IsStopped() && |
| 80 heap()->ShouldOptimizeForMemoryUsage()) { | 81 heap()->ShouldOptimizeForMemoryUsage()) { |
| 81 // Make progress with pending incremental marking if memory usage has | 82 // Make progress with pending incremental marking if memory usage has |
| 82 // higher priority than latency. This is important for background tabs | 83 // higher priority than latency. This is important for background tabs |
| 83 // that do not send idle notifications. | 84 // that do not send idle notifications. |
| 84 const int kIncrementalMarkingDelayMs = 500; | 85 const int kIncrementalMarkingDelayMs = 500; |
| 85 double deadline = heap()->MonotonicallyIncreasingTimeInMs() + | 86 double deadline = heap()->MonotonicallyIncreasingTimeInMs() + |
| 86 kIncrementalMarkingDelayMs; | 87 kIncrementalMarkingDelayMs; |
| 87 heap()->incremental_marking()->AdvanceIncrementalMarking( | 88 heap()->incremental_marking()->AdvanceIncrementalMarking( |
| 88 deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD, | 89 deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD, |
| 89 IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask); | 90 IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask); |
| 90 heap()->FinalizeIncrementalMarkingIfComplete( | 91 heap()->FinalizeIncrementalMarkingIfComplete( |
| 91 GarbageCollectionReason::kFinalizeMarkingViaTask); | 92 GarbageCollectionReason::kFinalizeMarkingViaTask); |
| 92 } | 93 } |
| 93 // Re-schedule the timer. | 94 // Re-schedule the timer. |
| 94 ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms); | 95 ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms); |
| 95 if (FLAG_trace_gc_verbose) { | 96 if (FLAG_trace_gc_verbose) { |
| 96 PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n", | 97 heap()->isolate()->PrintWithTimestamp( |
| 97 state_.next_gc_start_ms - event.time_ms); | 98 "Memory reducer: waiting for %.f ms\n", |
| 99 state_.next_gc_start_ms - event.time_ms); |
| 98 } | 100 } |
| 99 } | 101 } |
| 100 } | 102 } |
| 101 | 103 |
| 102 | 104 |
| 103 void MemoryReducer::NotifyMarkCompact(const Event& event) { | 105 void MemoryReducer::NotifyMarkCompact(const Event& event) { |
| 104 DCHECK_EQ(kMarkCompact, event.type); | 106 DCHECK_EQ(kMarkCompact, event.type); |
| 105 Action old_action = state_.action; | 107 Action old_action = state_.action; |
| 106 state_ = Step(state_, event); | 108 state_ = Step(state_, event); |
| 107 if (old_action != kWait && state_.action == kWait) { | 109 if (old_action != kWait && state_.action == kWait) { |
| 108 // If we are transitioning to the WAIT state, start the timer. | 110 // If we are transitioning to the WAIT state, start the timer. |
| 109 ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms); | 111 ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms); |
| 110 } | 112 } |
| 111 if (old_action == kRun) { | 113 if (old_action == kRun) { |
| 112 if (FLAG_trace_gc_verbose) { | 114 if (FLAG_trace_gc_verbose) { |
| 113 PrintIsolate(heap()->isolate(), "Memory reducer: finished GC #%d (%s)\n", | 115 heap()->isolate()->PrintWithTimestamp( |
| 114 state_.started_gcs, | 116 "Memory reducer: finished GC #%d (%s)\n", state_.started_gcs, |
| 115 state_.action == kWait ? "will do more" : "done"); | 117 state_.action == kWait ? "will do more" : "done"); |
| 116 } | 118 } |
| 117 } | 119 } |
| 118 } | 120 } |
| 119 | 121 |
| 120 void MemoryReducer::NotifyPossibleGarbage(const Event& event) { | 122 void MemoryReducer::NotifyPossibleGarbage(const Event& event) { |
| 121 DCHECK_EQ(kPossibleGarbage, event.type); | 123 DCHECK_EQ(kPossibleGarbage, event.type); |
| 122 Action old_action = state_.action; | 124 Action old_action = state_.action; |
| 123 state_ = Step(state_, event); | 125 state_ = Step(state_, event); |
| 124 if (old_action != kWait && state_.action == kWait) { | 126 if (old_action != kWait && state_.action == kWait) { |
| 125 // If we are transitioning to the WAIT state, start the timer. | 127 // If we are transitioning to the WAIT state, start the timer. |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 201 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate()); | 203 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate()); |
| 202 auto timer_task = new MemoryReducer::TimerTask(this); | 204 auto timer_task = new MemoryReducer::TimerTask(this); |
| 203 V8::GetCurrentPlatform()->CallDelayedOnForegroundThread( | 205 V8::GetCurrentPlatform()->CallDelayedOnForegroundThread( |
| 204 isolate, timer_task, (delay_ms + kSlackMs) / 1000.0); | 206 isolate, timer_task, (delay_ms + kSlackMs) / 1000.0); |
| 205 } | 207 } |
| 206 | 208 |
| 207 void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); } | 209 void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); } |
| 208 | 210 |
| 209 } // namespace internal | 211 } // namespace internal |
| 210 } // namespace v8 | 212 } // namespace v8 |
| OLD | NEW |