Index: base/trace_event/memory_peak_detector.cc |
diff --git a/base/trace_event/memory_peak_detector.cc b/base/trace_event/memory_peak_detector.cc |
index c361037c2d800540edbcf72ee2bbd415e1d69fab..2423fa3361795846dc0e30b97dd768c4373f0d3f 100644 |
--- a/base/trace_event/memory_peak_detector.cc |
+++ b/base/trace_event/memory_peak_detector.cc |
@@ -4,13 +4,17 @@ |
#include "base/trace_event/memory_peak_detector.h" |
-#include <stdint.h> |
+#include <algorithm> |
#include "base/bind.h" |
#include "base/logging.h" |
+#include "base/sys_info.h" |
#include "base/threading/sequenced_task_runner_handle.h" |
#include "base/time/time.h" |
+#include "base/trace_event/memory_dump_manager.h" |
#include "base/trace_event/memory_dump_provider_info.h" |
+#include "base/trace_event/trace_event.h" |
+#include "build/build_config.h" |
namespace base { |
namespace trace_event { |
@@ -24,7 +28,6 @@ MemoryPeakDetector* MemoryPeakDetector::GetInstance() { |
MemoryPeakDetector::MemoryPeakDetector() |
: generation_(0), |
state_(NOT_INITIALIZED), |
- polling_interval_ms_(0), |
poll_tasks_count_for_testing_(0) {} |
MemoryPeakDetector::~MemoryPeakDetector() { |
@@ -46,6 +49,19 @@ void MemoryPeakDetector::Setup( |
task_runner_ = task_runner; |
on_peak_detected_callback_ = on_peak_detected_callback; |
state_ = DISABLED; |
+ config_ = {}; |
+ ResetPollHistory(); |
+ |
+ static_threshold_bytes_ = 0; |
+#if !defined(OS_NACL) |
+ // Set threshold to 1% of total system memory. |
+ static_threshold_bytes_ = |
+ static_cast<uint64_t>(SysInfo::AmountOfPhysicalMemory()) / 100; |
+#endif |
+ // Fallback, mostly for test environments where AmountOfPhysicalMemory() is |
+ // broken. |
+ static_threshold_bytes_ = |
+ std::max(static_threshold_bytes_, static_cast<uint64_t>(5 * 1024 * 1024)); |
} |
void MemoryPeakDetector::TearDown() { |
@@ -57,9 +73,13 @@ void MemoryPeakDetector::TearDown() { |
task_runner_ = nullptr; |
} |
-void MemoryPeakDetector::Start() { |
- task_runner_->PostTask( |
- FROM_HERE, Bind(&MemoryPeakDetector::StartInternal, Unretained(this))); |
+void MemoryPeakDetector::Start(MemoryPeakDetector::Config config) { |
+ if (!config.polling_interval_ms) { |
+ NOTREACHED(); |
+ return; |
+ } |
+ task_runner_->PostTask(FROM_HERE, Bind(&MemoryPeakDetector::StartInternal, |
+ Unretained(this), config)); |
} |
void MemoryPeakDetector::Stop() { |
@@ -67,28 +87,36 @@ void MemoryPeakDetector::Stop() { |
FROM_HERE, Bind(&MemoryPeakDetector::StopInternal, Unretained(this))); |
} |
+void MemoryPeakDetector::Throttle() { |
+ if (!task_runner_) |
+ return; // Can be called before Setup(). |
+ task_runner_->PostTask( |
+ FROM_HERE, Bind(&MemoryPeakDetector::ResetPollHistory, Unretained(this), |
+ true /* keep_last_sample */)); |
+} |
+ |
void MemoryPeakDetector::NotifyMemoryDumpProvidersChanged() { |
- // It is possible to call this before the first Setup() call, in which case |
- // we want to just make this a noop. The next Start() will fetch the MDP list. |
if (!task_runner_) |
- return; |
+ return; // Can be called before Setup(). |
task_runner_->PostTask( |
FROM_HERE, |
Bind(&MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded, |
Unretained(this))); |
} |
-void MemoryPeakDetector::StartInternal() { |
+void MemoryPeakDetector::StartInternal(MemoryPeakDetector::Config config) { |
DCHECK_EQ(DISABLED, state_); |
state_ = ENABLED; |
- polling_interval_ms_ = 1; // TODO(primiano): temporary until next CL. |
+ config_ = config; |
+ ResetPollHistory(); |
- // If there are any dump providers available, NotifyMemoryDumpProvidersChanged |
- // will fetch them and start the polling. Otherwise this will remain in the |
- // ENABLED state and the actual polling will start on the next call to |
+ // If there are any dump providers available, |
+ // NotifyMemoryDumpProvidersChanged will fetch them and start the polling. |
+ // Otherwise this will remain in the ENABLED state and the actual polling |
+ // will start on the next call to |
// ReloadDumpProvidersAndStartPollingIfNeeded(). |
- // Depending on the sandbox model, it is possible that no polling-capable dump |
- // providers will be ever available. |
+ // Depending on the sandbox model, it is possible that no polling-capable |
+ // dump providers will be ever available. |
ReloadDumpProvidersAndStartPollingIfNeeded(); |
} |
@@ -132,7 +160,7 @@ void MemoryPeakDetector::ReloadDumpProvidersAndStartPollingIfNeeded() { |
} |
void MemoryPeakDetector::PollMemoryAndDetectPeak(uint32_t expected_generation) { |
- if (state_ != RUNNING || expected_generation != generation_) |
+ if (state_ != RUNNING || generation_ != expected_generation) |
return; |
// We should never end up in a situation where state_ == RUNNING but all dump |
@@ -140,24 +168,107 @@ void MemoryPeakDetector::PollMemoryAndDetectPeak(uint32_t expected_generation) { |
DCHECK(!dump_providers_.empty()); |
poll_tasks_count_for_testing_++; |
- uint64_t memory_total = 0; |
+ uint64_t polled_mem_bytes = 0; |
for (const scoped_refptr<MemoryDumpProviderInfo>& mdp_info : |
dump_providers_) { |
DCHECK(mdp_info->options.is_fast_polling_supported); |
uint64_t value = 0; |
mdp_info->dump_provider->PollFastMemoryTotal(&value); |
- memory_total += value; |
+ polled_mem_bytes += value; |
} |
- ignore_result(memory_total); // TODO(primiano): temporary until next CL. |
+ if (config_.enable_verbose_poll_tracing) { |
+ TRACE_COUNTER1(MemoryDumpManager::kTraceCategory, "PolledMemoryMB", |
+ polled_mem_bytes / 1024 / 1024); |
+ } |
+ |
+ // Peak detection logic. Design doc: https://goo.gl/0kOU4A . |
+ bool is_peak = false; |
+ if (skip_polls_ > 0) { |
+ skip_polls_--; |
+ } else if (last_dump_memory_total_ == 0) { |
+ last_dump_memory_total_ = polled_mem_bytes; |
+ } else if (polled_mem_bytes > 0) { |
+ int64_t diff_from_last_dump = polled_mem_bytes - last_dump_memory_total_; |
- // TODO(primiano): Move actual peak detection logic from the |
- // MemoryDumpScheduler in next CLs. |
+ DCHECK_GT(static_threshold_bytes_, 0u); |
+ is_peak = |
+ diff_from_last_dump > static_cast<int64_t>(static_threshold_bytes_); |
+ if (!is_peak) |
+ is_peak = DetectPeakUsingSlidingWindowStddev(polled_mem_bytes); |
+ } |
+ |
+ DCHECK_GT(config_.polling_interval_ms, 0u); |
SequencedTaskRunnerHandle::Get()->PostDelayedTask( |
FROM_HERE, |
Bind(&MemoryPeakDetector::PollMemoryAndDetectPeak, Unretained(this), |
expected_generation), |
- TimeDelta::FromMilliseconds(polling_interval_ms_)); |
+ TimeDelta::FromMilliseconds(config_.polling_interval_ms)); |
+ |
+ if (!is_peak) |
+ return; |
+ TRACE_EVENT_INSTANT1(MemoryDumpManager::kTraceCategory, |
+ "Peak memory detected", TRACE_EVENT_SCOPE_PROCESS, |
+ "PolledMemoryMB", polled_mem_bytes / 1024 / 1024); |
+ ResetPollHistory(true /* keep_last_sample */); |
+ last_dump_memory_total_ = polled_mem_bytes; |
+ on_peak_detected_callback_.Run(); |
+} |
+ |
+bool MemoryPeakDetector::DetectPeakUsingSlidingWindowStddev( |
+ uint64_t polled_mem_bytes) { |
+ DCHECK(polled_mem_bytes); |
+ samples_bytes_[samples_index_] = polled_mem_bytes; |
+ samples_index_ = (samples_index_ + 1) % kSlidingWindowNumSamples; |
+ float mean = 0; |
+ for (uint32_t i = 0; i < kSlidingWindowNumSamples; ++i) { |
+ if (samples_bytes_[i] == 0) |
+ return false; // Not enough samples to detect peaks. |
+ mean += samples_bytes_[i]; |
+ } |
+ mean /= kSlidingWindowNumSamples; |
+ float variance = 0; |
+ for (uint32_t i = 0; i < kSlidingWindowNumSamples; ++i) { |
+ const float deviation = samples_bytes_[i] - mean; |
+ variance += deviation * deviation; |
+ } |
+ variance /= kSlidingWindowNumSamples; |
+ |
+ // If stddev is less than 0.2% then we consider that the process is inactive. |
+ if (variance < (mean / 500) * (mean / 500)) |
+ return false; |
+ |
+ // (mean + 3.69 * stddev) corresponds to a value that is higher than current |
+ // sample with 99.99% probability. |
+ const float cur_sample_deviation = polled_mem_bytes - mean; |
+ return cur_sample_deviation * cur_sample_deviation > (3.69 * 3.69 * variance); |
+} |
+ |
+void MemoryPeakDetector::ResetPollHistory(bool keep_last_sample) { |
+ // TODO(primiano,ssid): this logic should probably be revisited. In the case |
+ // of Android, the browser process sees the total of all processes memory in |
+ // the same peak detector instance. Perhaps the best thing to do here is to |
+ // keep the window of samples around and just bump the skip_polls_. |
+ last_dump_memory_total_ = 0; |
+ if (keep_last_sample) { |
+ const uint32_t prev_index = |
+ samples_index_ > 0 ? samples_index_ - 1 : kSlidingWindowNumSamples - 1; |
+ last_dump_memory_total_ = samples_bytes_[prev_index]; |
+ } |
+ memset(samples_bytes_, 0, sizeof(samples_bytes_)); |
+ samples_index_ = 0; |
+ skip_polls_ = 0; |
+ if (config_.polling_interval_ms > 0) { |
+ skip_polls_ = |
+ (config_.min_time_between_peaks_ms + config_.polling_interval_ms - 1) / |
+ config_.polling_interval_ms; |
+ } |
+} |
+ |
+void MemoryPeakDetector::SetStaticThresholdForTesting( |
+ uint64_t static_threshold_bytes) { |
+ DCHECK_EQ(DISABLED, state_); |
+ static_threshold_bytes_ = static_threshold_bytes; |
} |
} // namespace trace_event |