Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/trace_event/memory_dump_trigger.h" | |
| 6 | |
| 7 #include "base/command_line.h" | |
| 8 #include "base/process/process_metrics.h" | |
| 9 #include "base/single_thread_task_runner.h" | |
| 10 #include "base/trace_event/memory_dump_manager.h" | |
| 11 #include "build/build_config.h" | |
| 12 | |
| 13 namespace base { | |
| 14 namespace trace_event { | |
| 15 | |
| 16 namespace { | |
| 17 // For triggering periodic dumps: | |
| 18 uint32_t g_periodic_dumps_count; | |
|
Primiano Tucci (use gerrit)
2017/01/18 16:16:06
why these are globals and not instance fields? Don
ssid
2017/01/20 23:07:27
I changed all of these to instance field of specif
| |
| 19 uint32_t g_light_dump_rate; | |
| 20 uint32_t g_heavy_dump_rate; | |
| 21 | |
| 22 // For triggering peak dumps: | |
| 23 const uint32_t kMemoryTotalsPollingInterval = 25; | |
| 24 uint32_t g_polling_interval_for_testing = 0; | |
| 25 | |
| 26 // Threshold on increase in memory from last dump beyond which a new dump must | |
| 27 // be triggered. | |
| 28 int64_t g_memory_increase_threshold = 50 * 1024 * 1024; // 50MiB | |
| 29 uint64_t g_last_dump_memory_total = 0; | |
| 30 | |
| 31 MemoryDumpLevelOfDetail g_peak_dumps_mode; | |
| 32 int g_min_polls_between_dumps = 0; | |
| 33 int g_num_polls_from_last_dump = 0; | |
| 34 | |
| 35 bool ShouldTriggerDump(uint64_t current_memory_total) { | |
| 36 if (current_memory_total == 0) | |
| 37 return false; | |
| 38 | |
| 39 bool should_dump = false; | |
| 40 ++g_num_polls_from_last_dump; | |
| 41 if (g_last_dump_memory_total == 0) { | |
| 42 // If it's first sample then trigger memory dump. | |
| 43 should_dump |= true; | |
| 44 } else if (g_min_polls_between_dumps > g_num_polls_from_last_dump) { | |
| 45 return false; | |
| 46 } | |
| 47 | |
| 48 int64_t increase_from_last_dump = | |
| 49 current_memory_total - g_last_dump_memory_total; | |
| 50 should_dump |= increase_from_last_dump > g_memory_increase_threshold; | |
| 51 if (should_dump) { | |
| 52 g_last_dump_memory_total = current_memory_total; | |
| 53 g_num_polls_from_last_dump = 0; | |
| 54 TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory, | |
| 55 "Peak memory dump Triggered", | |
| 56 TRACE_EVENT_SCOPE_PROCESS, "total_usage_MB", | |
| 57 current_memory_total / 1024 / 1024); | |
| 58 } | |
| 59 return should_dump; | |
| 60 } | |
| 61 | |
| 62 } // namespace | |
| 63 | |
| 64 MemoryDumpTrigger::MemoryDumpTrigger(MemoryDumpManager* mdm, | |
| 65 bool is_coordinator) | |
| 66 : mdm_(mdm), is_coordinator_(is_coordinator), polling_enabled_(0) { | |
| 67 #if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \ | |
| 68 defined(OS_ANDROID) | |
| 69 // Set threshold to 1% of total system memory. | |
| 70 SystemMemoryInfoKB meminfo; | |
| 71 bool res = GetSystemMemoryInfo(&meminfo); | |
| 72 if (res) | |
| 73 g_memory_increase_threshold = (meminfo.total / 100) * 1024; | |
| 74 #endif | |
| 75 } | |
| 76 | |
| 77 MemoryDumpTrigger::~MemoryDumpTrigger() { | |
| 78 DCHECK(!polling_task_runner_); | |
| 79 Disable(); | |
| 80 } | |
| 81 | |
| 82 void MemoryDumpTrigger::SetupPeakTriggers( | |
| 83 const TriggerList& trigger_list, | |
| 84 scoped_refptr<SingleThreadTaskRunner> polling_task_runner) { | |
| 85 if (subtle::NoBarrier_Load(&polling_enabled_)) | |
| 86 return; | |
| 87 if (trigger_list.size() != 1 || | |
| 88 trigger_list.begin()->trigger_type != MemoryDumpType::PEAK_MEMORY_USAGE) { | |
| 89 return; | |
| 90 } | |
| 91 | |
| 92 DCHECK(polling_task_runner); | |
| 93 const auto& peak_trigger = *trigger_list.begin(); | |
| 94 DCHECK_NE(0u, peak_trigger.min_time_between_dumps_ms); | |
| 95 | |
| 96 g_peak_dumps_mode = peak_trigger.level_of_detail; | |
| 97 uint32_t polling_interval = g_polling_interval_for_testing | |
| 98 ? g_polling_interval_for_testing | |
| 99 : kMemoryTotalsPollingInterval; | |
| 100 g_min_polls_between_dumps = | |
| 101 (peak_trigger.min_time_between_dumps_ms + polling_interval - 1) / | |
| 102 polling_interval; | |
| 103 g_num_polls_from_last_dump = 0; | |
| 104 g_last_dump_memory_total = 0; | |
| 105 subtle::NoBarrier_Store(&polling_enabled_, 1); | |
| 106 DCHECK(!polling_task_runner_); | |
| 107 polling_task_runner_ = polling_task_runner; | |
| 108 polling_task_runner->PostTask( | |
| 109 FROM_HERE, | |
| 110 Bind(&MemoryDumpTrigger::PollMemoryOnPollingThread, Unretained(this))); | |
| 111 return; | |
| 112 } | |
| 113 | |
| 114 void MemoryDumpTrigger::SetupPeriodicTriggers(const TriggerList& trigger_list) { | |
| 115 if (trigger_list.empty() || !is_coordinator_) | |
| 116 return; | |
| 117 | |
| 118 // At the moment the periodic support is limited to at most one periodic | |
| 119 // trigger per dump mode. All intervals should be an integer multiple of the | |
| 120 // smallest interval specified. | |
| 121 g_periodic_dumps_count = 0; | |
| 122 uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max(); | |
| 123 uint32_t light_dump_period_ms = 0; | |
| 124 uint32_t heavy_dump_period_ms = 0; | |
| 125 DCHECK_LE(trigger_list.size(), 3u); | |
| 126 auto* mdm = mdm_; | |
| 127 for (const TraceConfig::MemoryDumpConfig::Trigger& config : trigger_list) { | |
| 128 if (config.trigger_type != MemoryDumpType::PERIODIC_INTERVAL) | |
| 129 continue; | |
| 130 DCHECK_NE(0u, config.min_time_between_dumps_ms); | |
| 131 switch (config.level_of_detail) { | |
| 132 case MemoryDumpLevelOfDetail::BACKGROUND: | |
| 133 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND)); | |
| 134 break; | |
| 135 case MemoryDumpLevelOfDetail::LIGHT: | |
| 136 DCHECK_EQ(0u, light_dump_period_ms); | |
| 137 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT)); | |
| 138 light_dump_period_ms = config.min_time_between_dumps_ms; | |
| 139 break; | |
| 140 case MemoryDumpLevelOfDetail::DETAILED: | |
| 141 DCHECK_EQ(0u, heavy_dump_period_ms); | |
| 142 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED)); | |
| 143 heavy_dump_period_ms = config.min_time_between_dumps_ms; | |
| 144 break; | |
| 145 } | |
| 146 min_timer_period_ms = | |
| 147 std::min(min_timer_period_ms, config.min_time_between_dumps_ms); | |
| 148 } | |
| 149 | |
| 150 DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms); | |
| 151 g_light_dump_rate = light_dump_period_ms / min_timer_period_ms; | |
| 152 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms); | |
| 153 g_heavy_dump_rate = heavy_dump_period_ms / min_timer_period_ms; | |
| 154 | |
| 155 timer_.Start( | |
| 156 FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms), | |
| 157 Bind(&MemoryDumpTrigger::RequestPeriodicGlobalDump, Unretained(this))); | |
| 158 } | |
| 159 | |
| 160 void MemoryDumpTrigger::Disable() { | |
| 161 if (timer_.IsRunning()) | |
| 162 timer_.Stop(); | |
| 163 subtle::NoBarrier_Store(&polling_enabled_, 0); | |
| 164 polling_task_runner_ = nullptr; | |
| 165 } | |
| 166 | |
| 167 bool MemoryDumpTrigger::IsPeriodicDumpTimerRunning() const { | |
| 168 return timer_.IsRunning(); | |
| 169 } | |
| 170 | |
| 171 // static | |
| 172 void MemoryDumpTrigger::SetPollingIntervalForTesting(uint32_t interval) { | |
| 173 g_polling_interval_for_testing = interval; | |
| 174 } | |
| 175 | |
| 176 void MemoryDumpTrigger::RequestPeriodicGlobalDump() { | |
| 177 MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND; | |
| 178 if (g_light_dump_rate > 0 && g_periodic_dumps_count % g_light_dump_rate == 0) | |
| 179 level_of_detail = MemoryDumpLevelOfDetail::LIGHT; | |
| 180 if (g_heavy_dump_rate > 0 && g_periodic_dumps_count % g_heavy_dump_rate == 0) | |
| 181 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; | |
| 182 ++g_periodic_dumps_count; | |
| 183 | |
| 184 mdm_->RequestGlobalDump(MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); | |
| 185 } | |
| 186 | |
| 187 void MemoryDumpTrigger::PollMemoryOnPollingThread() { | |
| 188 if (!subtle::NoBarrier_Load(&polling_enabled_)) { | |
| 189 polling_task_runner_ = nullptr; | |
| 190 return; | |
| 191 } | |
| 192 | |
| 193 uint64_t metric = 0; | |
| 194 bool res = mdm_->PollFastMemoryTotal(&metric); | |
| 195 DCHECK(res); | |
| 196 if (g_peak_dumps_mode == MemoryDumpLevelOfDetail::DETAILED) { | |
| 197 TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "MemoryMetricMB", | |
| 198 metric / 1024 / 1024); | |
| 199 } | |
| 200 | |
| 201 if (ShouldTriggerDump(metric)) { | |
| 202 mdm_->RequestGlobalDump(MemoryDumpType::PEAK_MEMORY_USAGE, | |
| 203 g_peak_dumps_mode); | |
| 204 } | |
| 205 | |
| 206 // TODO(ssid): Use RequestSchedulerCallback, crbug.com/607533. | |
| 207 uint32_t polling_interval = g_polling_interval_for_testing | |
| 208 ? g_polling_interval_for_testing | |
| 209 : kMemoryTotalsPollingInterval; | |
| 210 polling_task_runner_->PostDelayedTask( | |
| 211 FROM_HERE, | |
| 212 Bind(&MemoryDumpTrigger::PollMemoryOnPollingThread, Unretained(this)), | |
| 213 TimeDelta::FromMilliseconds(polling_interval)); | |
| 214 } | |
| 215 | |
| 216 } // namespace trace_event | |
| 217 } // namespace base | |
| OLD | NEW |