Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(78)

Side by Side Diff: base/trace_event/memory_dump_scheduler.cc

Issue 2799023002: memory-infra: Switch to MemoryPeakDetector and simplify MemoryDumpScheduler (Closed)
Patch Set: ssid comments + add more tests Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2017 The Chromium Authors. All rights reserved. 1 // Copyright 2017 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_scheduler.h" 5 #include "base/trace_event/memory_dump_scheduler.h"
6 6
7 #include "base/process/process_metrics.h" 7 #include <limits>
8 #include "base/single_thread_task_runner.h" 8
9 #include "base/threading/thread_task_runner_handle.h" 9 #include "base/bind.h"
10 #include "base/trace_event/memory_dump_manager.h" 10 #include "base/logging.h"
11 #include "build/build_config.h" 11 #include "base/threading/sequenced_task_runner_handle.h"
12 12
13 namespace base { 13 namespace base {
14 namespace trace_event { 14 namespace trace_event {
15 15
16 namespace {
17 // Threshold on increase in memory from last dump beyond which a new dump must
18 // be triggered.
19 int64_t kDefaultMemoryIncreaseThreshold = 50 * 1024 * 1024; // 50MiB
20 const uint32_t kMemoryTotalsPollingInterval = 25;
21 uint32_t g_polling_interval_ms_for_testing = 0;
22 } // namespace
23
24 // static 16 // static
25 MemoryDumpScheduler* MemoryDumpScheduler::GetInstance() { 17 MemoryDumpScheduler* MemoryDumpScheduler::GetInstance() {
26 static MemoryDumpScheduler* instance = new MemoryDumpScheduler(); 18 static MemoryDumpScheduler* instance = new MemoryDumpScheduler();
27 return instance; 19 return instance;
28 } 20 }
29 21
30 MemoryDumpScheduler::MemoryDumpScheduler() : mdm_(nullptr), is_setup_(false) {} 22 MemoryDumpScheduler::MemoryDumpScheduler() : period_ms_(0), generation_(0) {}
31 MemoryDumpScheduler::~MemoryDumpScheduler() {} 23 MemoryDumpScheduler::~MemoryDumpScheduler() {
32 24 // Hit only in tests. Check that tests don't leave without stopping.
33 void MemoryDumpScheduler::Setup( 25 DCHECK(!is_enabled_for_testing());
34 MemoryDumpManager* mdm,
35 scoped_refptr<SingleThreadTaskRunner> polling_task_runner) {
36 mdm_ = mdm;
37 polling_task_runner_ = polling_task_runner;
38 periodic_state_.reset(new PeriodicTriggerState);
39 polling_state_.reset(new PollingTriggerState);
40 is_setup_ = true;
41 } 26 }
42 27
43 void MemoryDumpScheduler::AddTrigger(MemoryDumpType trigger_type, 28 void MemoryDumpScheduler::Start(
44 MemoryDumpLevelOfDetail level_of_detail, 29 MemoryDumpScheduler::Config config,
45 uint32_t min_time_between_dumps_ms) { 30 scoped_refptr<SequencedTaskRunner> task_runner) {
46 DCHECK(is_setup_); 31 DCHECK(!task_runner_);
47 if (trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) { 32 task_runner_ = task_runner;
48 DCHECK(!periodic_state_->is_configured); 33 task_runner->PostTask(FROM_HERE, Bind(&MemoryDumpScheduler::StartInternal,
49 DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_->current_state); 34 Unretained(this), config));
50 DCHECK_NE(0u, min_time_between_dumps_ms); 35 }
51 36
52 polling_state_->level_of_detail = level_of_detail; 37 void MemoryDumpScheduler::Stop() {
53 polling_state_->min_polls_between_dumps = 38 if (!task_runner_)
54 (min_time_between_dumps_ms + polling_state_->polling_interval_ms - 1) / 39 return;
55 polling_state_->polling_interval_ms; 40 task_runner_->PostTask(
56 polling_state_->current_state = PollingTriggerState::CONFIGURED; 41 FROM_HERE, Bind(&MemoryDumpScheduler::StopInternal, Unretained(this)));
57 } else if (trigger_type == MemoryDumpType::PERIODIC_INTERVAL) { 42 task_runner_ = nullptr;
58 DCHECK_EQ(PollingTriggerState::DISABLED, polling_state_->current_state); 43 }
59 periodic_state_->is_configured = true; 44
60 DCHECK_NE(0u, min_time_between_dumps_ms); 45 void MemoryDumpScheduler::StartInternal(MemoryDumpScheduler::Config config) {
61 switch (level_of_detail) { 46 uint32_t light_dump_period_ms = 0;
47 uint32_t heavy_dump_period_ms = 0;
48 uint32_t min_period_ms = std::numeric_limits<uint32_t>::max();
49 for (const Config::Trigger& trigger : config.triggers) {
50 DCHECK_GT(trigger.period_ms, 0u);
51 switch (trigger.level_of_detail) {
62 case MemoryDumpLevelOfDetail::BACKGROUND: 52 case MemoryDumpLevelOfDetail::BACKGROUND:
63 break; 53 break;
64 case MemoryDumpLevelOfDetail::LIGHT: 54 case MemoryDumpLevelOfDetail::LIGHT:
65 DCHECK_EQ(0u, periodic_state_->light_dump_period_ms); 55 DCHECK_EQ(0u, light_dump_period_ms);
66 periodic_state_->light_dump_period_ms = min_time_between_dumps_ms; 56 light_dump_period_ms = trigger.period_ms;
67 break; 57 break;
68 case MemoryDumpLevelOfDetail::DETAILED: 58 case MemoryDumpLevelOfDetail::DETAILED:
69 DCHECK_EQ(0u, periodic_state_->heavy_dump_period_ms); 59 DCHECK_EQ(0u, heavy_dump_period_ms);
70 periodic_state_->heavy_dump_period_ms = min_time_between_dumps_ms; 60 heavy_dump_period_ms = trigger.period_ms;
71 break; 61 break;
72 } 62 }
63 min_period_ms = std::min(min_period_ms, trigger.period_ms);
hjd 2017/04/11 09:42:55 #include <algorithm>?
Primiano Tucci (use gerrit) 2017/04/11 11:43:08 Good point done. (I always find funny that in orde
64 }
73 65
74 periodic_state_->min_timer_period_ms = std::min( 66 DCHECK_EQ(0u, light_dump_period_ms % min_period_ms);
75 periodic_state_->min_timer_period_ms, min_time_between_dumps_ms); 67 DCHECK_EQ(0u, heavy_dump_period_ms % min_period_ms);
76 DCHECK_EQ(0u, periodic_state_->light_dump_period_ms % 68 DCHECK(!config.callback.is_null());
77 periodic_state_->min_timer_period_ms); 69 callback_ = config.callback;
78 DCHECK_EQ(0u, periodic_state_->heavy_dump_period_ms % 70 period_ms_ = min_period_ms;
79 periodic_state_->min_timer_period_ms); 71 tick_count_ = 0;
80 } 72 light_dump_rate_ = light_dump_period_ms / min_period_ms;
73 heavy_dump_rate_ = heavy_dump_period_ms / min_period_ms;
74 SequencedTaskRunnerHandle::Get()->PostTask(
75 FROM_HERE,
76 Bind(&MemoryDumpScheduler::Tick, Unretained(this), ++generation_));
81 } 77 }
82 78
83 void MemoryDumpScheduler::EnablePeriodicTriggerIfNeeded() { 79 void MemoryDumpScheduler::StopInternal() {
84 DCHECK(is_setup_); 80 period_ms_ = 0;
85 if (!periodic_state_->is_configured || periodic_state_->timer.IsRunning()) 81 generation_++;
86 return; 82 callback_.Reset();
87 periodic_state_->light_dumps_rate = periodic_state_->light_dump_period_ms /
88 periodic_state_->min_timer_period_ms;
89 periodic_state_->heavy_dumps_rate = periodic_state_->heavy_dump_period_ms /
90 periodic_state_->min_timer_period_ms;
91
92 periodic_state_->dump_count = 0;
93 periodic_state_->timer.Start(
94 FROM_HERE,
95 TimeDelta::FromMilliseconds(periodic_state_->min_timer_period_ms),
96 Bind(&MemoryDumpScheduler::RequestPeriodicGlobalDump, Unretained(this)));
97 } 83 }
98 84
99 void MemoryDumpScheduler::EnablePollingIfNeeded() { 85 void MemoryDumpScheduler::Tick(uint32_t expected_generation) {
100 DCHECK(is_setup_); 86 if (period_ms_ == 0 || generation_ != expected_generation)
101 if (polling_state_->current_state != PollingTriggerState::CONFIGURED)
102 return; 87 return;
103 88
104 polling_state_->current_state = PollingTriggerState::ENABLED; 89 SequencedTaskRunnerHandle::Get()->PostDelayedTask(
105 polling_state_->ResetTotals(); 90 FROM_HERE,
91 Bind(&MemoryDumpScheduler::Tick, Unretained(this), expected_generation),
92 TimeDelta::FromMilliseconds(period_ms_));
106 93
107 polling_task_runner_->PostTask( 94 MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
108 FROM_HERE, 95 if (light_dump_rate_ > 0 && tick_count_ % light_dump_rate_ == 0)
109 Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this))); 96 level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
97 if (heavy_dump_rate_ > 0 && tick_count_ % heavy_dump_rate_ == 0)
98 level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
99 tick_count_++;
100
101 callback_.Run(level_of_detail);
110 } 102 }
111 103
112 void MemoryDumpScheduler::NotifyDumpTriggered() { 104 MemoryDumpScheduler::Config::Config() {}
113 if (polling_task_runner_ && 105 MemoryDumpScheduler::Config::~Config() {}
114 !polling_task_runner_->RunsTasksOnCurrentThread()) { 106 MemoryDumpScheduler::Config::Config(const MemoryDumpScheduler::Config&) =
115 polling_task_runner_->PostTask( 107 default;
116 FROM_HERE,
117 Bind(&MemoryDumpScheduler::NotifyDumpTriggered, Unretained(this)));
118 return;
119 }
120
121 if (!polling_state_ ||
122 polling_state_->current_state != PollingTriggerState::ENABLED) {
123 return;
124 }
125
126 polling_state_->ResetTotals();
127 }
128
129 void MemoryDumpScheduler::DisableAllTriggers() {
130 if (periodic_state_) {
131 if (periodic_state_->timer.IsRunning())
132 periodic_state_->timer.Stop();
133 periodic_state_.reset();
134 }
135
136 if (polling_task_runner_) {
137 DCHECK(polling_state_);
138 polling_task_runner_->PostTask(
139 FROM_HERE, Bind(&MemoryDumpScheduler::DisablePollingOnPollingThread,
140 Unretained(this)));
141 polling_task_runner_ = nullptr;
142 }
143 is_setup_ = false;
144 }
145
146 void MemoryDumpScheduler::DisablePollingOnPollingThread() {
147 polling_state_->current_state = PollingTriggerState::DISABLED;
148 polling_state_.reset();
149 }
150
151 // static
152 void MemoryDumpScheduler::SetPollingIntervalForTesting(uint32_t interval) {
153 g_polling_interval_ms_for_testing = interval;
154 }
155
156 bool MemoryDumpScheduler::IsPeriodicTimerRunningForTesting() {
157 return periodic_state_->timer.IsRunning();
158 }
159
160 void MemoryDumpScheduler::RequestPeriodicGlobalDump() {
161 MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
162 if (periodic_state_->light_dumps_rate > 0 &&
163 periodic_state_->dump_count % periodic_state_->light_dumps_rate == 0)
164 level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
165 if (periodic_state_->heavy_dumps_rate > 0 &&
166 periodic_state_->dump_count % periodic_state_->heavy_dumps_rate == 0)
167 level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
168 ++periodic_state_->dump_count;
169
170 mdm_->RequestGlobalDump(MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
171 }
172
173 void MemoryDumpScheduler::PollMemoryOnPollingThread() {
174 if (!polling_state_)
175 return;
176
177 DCHECK_EQ(PollingTriggerState::ENABLED, polling_state_->current_state);
178
179 uint64_t polled_memory = 0;
180 bool res = mdm_->PollFastMemoryTotal(&polled_memory);
181 DCHECK(res);
182 if (polling_state_->level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
183 TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB",
184 polled_memory / 1024 / 1024);
185 }
186
187 if (ShouldTriggerDump(polled_memory)) {
188 TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory,
189 "Peak memory dump Triggered",
190 TRACE_EVENT_SCOPE_PROCESS, "total_usage_MB",
191 polled_memory / 1024 / 1024);
192
193 mdm_->RequestGlobalDump(MemoryDumpType::PEAK_MEMORY_USAGE,
194 polling_state_->level_of_detail);
195 }
196
197 // TODO(ssid): Use RequestSchedulerCallback, crbug.com/607533.
198 ThreadTaskRunnerHandle::Get()->PostDelayedTask(
199 FROM_HERE,
200 Bind(&MemoryDumpScheduler::PollMemoryOnPollingThread, Unretained(this)),
201 TimeDelta::FromMilliseconds(polling_state_->polling_interval_ms));
202 }
203
204 bool MemoryDumpScheduler::ShouldTriggerDump(uint64_t current_memory_total) {
205 // This function tries to detect peak memory usage as discussed in
206 // https://goo.gl/0kOU4A.
207
208 if (current_memory_total == 0)
209 return false;
210
211 bool should_dump = false;
212 ++polling_state_->num_polls_from_last_dump;
213 if (polling_state_->last_dump_memory_total == 0) {
214 // If it's first sample then trigger memory dump.
215 should_dump = true;
216 } else if (polling_state_->min_polls_between_dumps >
217 polling_state_->num_polls_from_last_dump) {
218 return false;
219 }
220
221 int64_t increase_from_last_dump =
222 current_memory_total - polling_state_->last_dump_memory_total;
223 should_dump |=
224 increase_from_last_dump > polling_state_->memory_increase_threshold;
225 should_dump |= IsCurrentSamplePeak(current_memory_total);
226 if (should_dump)
227 polling_state_->ResetTotals();
228 return should_dump;
229 }
230
231 bool MemoryDumpScheduler::IsCurrentSamplePeak(
232 uint64_t current_memory_total_bytes) {
233 uint64_t current_memory_total_kb = current_memory_total_bytes / 1024;
234 polling_state_->last_memory_totals_kb_index =
235 (polling_state_->last_memory_totals_kb_index + 1) %
236 PollingTriggerState::kMaxNumMemorySamples;
237 uint64_t mean = 0;
238 for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
239 if (polling_state_->last_memory_totals_kb[i] == 0) {
240 // Not enough samples to detect peaks.
241 polling_state_
242 ->last_memory_totals_kb[polling_state_->last_memory_totals_kb_index] =
243 current_memory_total_kb;
244 return false;
245 }
246 mean += polling_state_->last_memory_totals_kb[i];
247 }
248 mean = mean / PollingTriggerState::kMaxNumMemorySamples;
249 uint64_t variance = 0;
250 for (uint32_t i = 0; i < PollingTriggerState::kMaxNumMemorySamples; ++i) {
251 variance += (polling_state_->last_memory_totals_kb[i] - mean) *
252 (polling_state_->last_memory_totals_kb[i] - mean);
253 }
254 variance = variance / PollingTriggerState::kMaxNumMemorySamples;
255
256 polling_state_
257 ->last_memory_totals_kb[polling_state_->last_memory_totals_kb_index] =
258 current_memory_total_kb;
259
260 // If stddev is less than 0.2% then we consider that the process is inactive.
261 bool is_stddev_low = variance < mean / 500 * mean / 500;
262 if (is_stddev_low)
263 return false;
264
265 // (mean + 3.69 * stddev) corresponds to a value that is higher than current
266 // sample with 99.99% probability.
267 return (current_memory_total_kb - mean) * (current_memory_total_kb - mean) >
268 (3.69 * 3.69 * variance);
269 }
270
271 MemoryDumpScheduler::PeriodicTriggerState::PeriodicTriggerState()
272 : is_configured(false),
273 dump_count(0),
274 min_timer_period_ms(std::numeric_limits<uint32_t>::max()),
275 light_dumps_rate(0),
276 heavy_dumps_rate(0),
277 light_dump_period_ms(0),
278 heavy_dump_period_ms(0) {}
279
280 MemoryDumpScheduler::PeriodicTriggerState::~PeriodicTriggerState() {
281 DCHECK(!timer.IsRunning());
282 }
283
284 MemoryDumpScheduler::PollingTriggerState::PollingTriggerState()
285 : current_state(DISABLED),
286 level_of_detail(MemoryDumpLevelOfDetail::FIRST),
287 polling_interval_ms(g_polling_interval_ms_for_testing
288 ? g_polling_interval_ms_for_testing
289 : kMemoryTotalsPollingInterval),
290 min_polls_between_dumps(0),
291 num_polls_from_last_dump(-1),
292 last_dump_memory_total(0),
293 memory_increase_threshold(0),
294 last_memory_totals_kb_index(0) {}
295
296 MemoryDumpScheduler::PollingTriggerState::~PollingTriggerState() {}
297
298 void MemoryDumpScheduler::PollingTriggerState::ResetTotals() {
299 if (!memory_increase_threshold) {
300 memory_increase_threshold = kDefaultMemoryIncreaseThreshold;
301 #if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
302 defined(OS_ANDROID)
303 // Set threshold to 1% of total system memory.
304 SystemMemoryInfoKB meminfo;
305 bool res = GetSystemMemoryInfo(&meminfo);
306 if (res) {
307 memory_increase_threshold =
308 (static_cast<int64_t>(meminfo.total) / 100) * 1024;
309 }
310 DCHECK_GT(memory_increase_threshold, 0u);
311 #endif
312 }
313
314 // Update the |last_dump_memory_total|'s value from the totals if it's not
315 // first poll.
316 if (num_polls_from_last_dump >= 0 &&
317 last_memory_totals_kb[last_memory_totals_kb_index]) {
318 last_dump_memory_total =
319 last_memory_totals_kb[last_memory_totals_kb_index] * 1024;
320 }
321 num_polls_from_last_dump = 0;
322 for (uint32_t i = 0; i < kMaxNumMemorySamples; ++i)
323 last_memory_totals_kb[i] = 0;
324 last_memory_totals_kb_index = 0;
325 }
326 108
327 } // namespace trace_event 109 } // namespace trace_event
328 } // namespace base 110 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698