Index: chrome/browser/metrics/subprocess_metrics_provider.cc |
diff --git a/chrome/browser/metrics/subprocess_metrics_provider.cc b/chrome/browser/metrics/subprocess_metrics_provider.cc |
index 6d0365d58c7da0ad838ffc4057c5018ac164830b..eea759a7c6886d320e7d86cc07f2ab394762d368 100644 |
--- a/chrome/browser/metrics/subprocess_metrics_provider.cc |
+++ b/chrome/browser/metrics/subprocess_metrics_provider.cc |
@@ -7,68 +7,118 @@ |
#include "base/logging.h" |
#include "base/memory/ptr_util.h" |
#include "base/metrics/histogram_base.h" |
+#include "base/metrics/histogram_flattener.h" |
#include "base/metrics/histogram_macros.h" |
+#include "base/metrics/histogram_samples.h" |
#include "base/metrics/persistent_histogram_allocator.h" |
#include "base/metrics/persistent_memory_allocator.h" |
+#include "base/metrics/statistics_recorder.h" |
+#include "base/stl_util.h" |
+#include "base/strings/stringprintf.h" |
#include "components/metrics/metrics_service.h" |
#include "content/public/browser/notification_service.h" |
#include "content/public/browser/notification_types.h" |
#include "content/public/browser/render_process_host.h" |
+namespace { |
+ |
+class HistogramOutputFlattener : public base::HistogramFlattener { |
+ public: |
+ explicit HistogramOutputFlattener(bool html) : html_(html) {} |
+ |
+ void RecordDelta(const base::HistogramBase& histogram, |
+ const base::HistogramSamples& snapshot) override { |
+ std::string& graph = graphs_[histogram.histogram_name()]; |
+ if (html_) { |
+ histogram.WriteHTMLGraph(&snapshot, &graph); |
+ graph.append("<br><hr><br>"); |
+ } else { |
+ histogram.WriteAscii(&snapshot, &graph); |
+ graph.append("\n"); |
+ } |
+ } |
+ |
+ void InconsistencyDetected( |
+ base::HistogramBase::Inconsistency problem) override {} |
+ |
+ void UniqueInconsistencyDetected( |
+ base::HistogramBase::Inconsistency problem) override {} |
+ |
+ void InconsistencyDetectedInLoggedCount(int amount) override {} |
+ |
+ void WriteOutput(std::string* output) { |
+ for (auto& name_and_graph : graphs_) { |
+ *output += name_and_graph.second; |
+ } |
+ } |
+ |
+ private: |
+ bool html_; |
+ |
+ // Map of histogram names to histogram output. This is used to have the |
+ // display appear in alphabetical order. |
+ std::map<const std::string, std::string> graphs_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(HistogramOutputFlattener); |
+}; |
+ |
+} // namespace |
+ |
SubprocessMetricsProvider::SubprocessMetricsProvider() |
: scoped_observer_(this) { |
+ base::StatisticsRecorder::RegisterMetricsDisplay(this); |
registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_CREATED, |
content::NotificationService::AllBrowserContextsAndSources()); |
} |
-SubprocessMetricsProvider::~SubprocessMetricsProvider() {} |
+SubprocessMetricsProvider::~SubprocessMetricsProvider() { |
+ base::StatisticsRecorder::DeregisterMetricsDisplay(this); |
+} |
void SubprocessMetricsProvider::RegisterSubprocessAllocator( |
int id, |
std::unique_ptr<base::PersistentHistogramAllocator> allocator) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- DCHECK(!allocators_by_id_.Lookup(id)); |
+ DCHECK(!ContainsKey(allocators_by_id_, id)); |
+ |
+ base::AutoLock lock(lock_); |
- // Map is "MapOwnPointer" so transfer ownership to it. |
- allocators_by_id_.AddWithID(allocator.release(), id); |
+ // Insert has to be done with a pair in order to support "move" semantics. |
+ allocators_by_id_.insert(std::make_pair(id, std::move(allocator))); |
} |
void SubprocessMetricsProvider::DeregisterSubprocessAllocator(int id) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
+ base::AutoLock lock(lock_); |
- if (!allocators_by_id_.Lookup(id)) |
+ auto existing = allocators_by_id_.find(id); |
+ if (existing == allocators_by_id_.end()) |
return; |
- // Extract the matching allocator from the list of active ones. |
- std::unique_ptr<base::PersistentHistogramAllocator> allocator( |
- allocators_by_id_.Replace(id, nullptr)); |
- allocators_by_id_.Remove(id); |
- DCHECK(allocator); |
- |
// If metrics recording is enabled, transfer the allocator to the "release" |
// list. The allocator will continue to live (and keep the associated shared |
// memory alive) until the next upload after which it will be released. |
// Otherwise, the allocator and its memory will be released when the |
// unique_ptr goes out of scope at the end of this method. |
if (metrics_recording_enabled_) |
- allocators_to_release_.push_back(std::move(allocator)); |
+ allocators_to_release_.push_back(std::move(existing->second)); |
+ |
+ allocators_by_id_.erase(existing); |
} |
void SubprocessMetricsProvider::OnRecordingEnabled() { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- |
metrics_recording_enabled_ = true; |
} |
void SubprocessMetricsProvider::OnRecordingDisabled() { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- |
metrics_recording_enabled_ = false; |
+ |
+ base::AutoLock lock(lock_); |
allocators_to_release_.clear(); |
} |
void SubprocessMetricsProvider::RecordHistogramSnapshotsFromAllocator( |
base::HistogramSnapshotManager* snapshot_manager, |
+ const std::string& query, |
+ bool absolute, |
int id, |
base::PersistentHistogramAllocator* allocator) { |
DCHECK(allocator); |
@@ -79,7 +129,13 @@ void SubprocessMetricsProvider::RecordHistogramSnapshotsFromAllocator( |
std::unique_ptr<base::HistogramBase> histogram = hist_iter.GetNext(); |
if (!histogram) |
break; |
- snapshot_manager->PrepareDeltaTakingOwnership(std::move(histogram)); |
+ if (histogram->histogram_name().find(query) == std::string::npos) |
+ continue; |
+ |
+ if (absolute) |
+ snapshot_manager->PrepareAbsoluteTakingOwnership(std::move(histogram)); |
+ else |
+ snapshot_manager->PrepareDeltaTakingOwnership(std::move(histogram)); |
++histogram_count; |
} |
@@ -89,16 +145,18 @@ void SubprocessMetricsProvider::RecordHistogramSnapshotsFromAllocator( |
void SubprocessMetricsProvider::RecordHistogramSnapshots( |
base::HistogramSnapshotManager* snapshot_manager) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
+ base::AutoLock lock(lock_); |
- for (AllocatorByIdMap::iterator iter(&allocators_by_id_); !iter.IsAtEnd(); |
- iter.Advance()) { |
- RecordHistogramSnapshotsFromAllocator( |
- snapshot_manager, iter.GetCurrentKey(), iter.GetCurrentValue()); |
+ for (auto& id_and_allocator : allocators_by_id_) { |
+ RecordHistogramSnapshotsFromAllocator(snapshot_manager, std::string(), |
+ false, id_and_allocator.first, |
+ id_and_allocator.second.get()); |
} |
- for (auto& allocator : allocators_to_release_) |
- RecordHistogramSnapshotsFromAllocator(snapshot_manager, 0, allocator.get()); |
+ for (auto& allocator : allocators_to_release_) { |
+ RecordHistogramSnapshotsFromAllocator(snapshot_manager, std::string(), |
+ false, 0, allocator.get()); |
+ } |
UMA_HISTOGRAM_COUNTS_100( |
"UMA.SubprocessMetricsProvider.SubprocessCount", |
@@ -111,13 +169,52 @@ void SubprocessMetricsProvider::RecordHistogramSnapshots( |
allocators_to_release_.clear(); |
} |
+void SubprocessMetricsProvider::WriteTitleString(std::string* output) { |
+ base::AutoLock lock(lock_); |
+ base::StringAppendF( |
+ output, |
+ "Histograms belonging to %d currently active and %d recently exited " |
+ "sub-processes (merged). Data from older processes are not included.", |
Ilya Sherman
2016/04/13 00:29:32
Hmm, why is data from older processes not included
bcwhite
2016/04/13 11:58:18
The memory segments containing persistent histogra
|
+ static_cast<int>(allocators_by_id_.size()), |
+ static_cast<int>(allocators_to_release_.size())); |
+ if (!metrics_recording_enabled_) { |
+ output->append(" UMA reporting is not currently enabled so data from" |
+ " processes that have exited are not kept at all."); |
+ } |
+} |
+ |
+void SubprocessMetricsProvider::WriteGraphs(const std::string& query, |
+ bool html, |
+ std::string* output) { |
+ base::AutoLock lock(lock_); |
+ |
+ HistogramOutputFlattener flattener(html); |
+ base::HistogramSnapshotManager snapshot_manager(&flattener); |
+ snapshot_manager.StartDeltas(); |
+ |
+ for (auto& id_and_allocator : allocators_by_id_) { |
+ RecordHistogramSnapshotsFromAllocator(&snapshot_manager, query, true, |
+ id_and_allocator.first, |
+ id_and_allocator.second.get()); |
+ } |
+ |
+ for (auto& allocator : allocators_to_release_) { |
+ RecordHistogramSnapshotsFromAllocator(&snapshot_manager, query, true, |
+ 0, allocator.get()); |
+ } |
+ |
+ snapshot_manager.FinishDeltas(); |
+ flattener.WriteOutput(output); |
+} |
+ |
void SubprocessMetricsProvider::Observe( |
int type, |
const content::NotificationSource& source, |
const content::NotificationDetails& details) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
DCHECK_EQ(content::NOTIFICATION_RENDERER_PROCESS_CREATED, type); |
+ base::AutoLock lock(lock_); |
+ |
content::RenderProcessHost* host = |
content::Source<content::RenderProcessHost>(source).ptr(); |
@@ -129,8 +226,6 @@ void SubprocessMetricsProvider::Observe( |
void SubprocessMetricsProvider::RenderProcessReady( |
content::RenderProcessHost* host) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- |
// If the render-process-host passed a persistent-memory-allocator to the |
// renderer process, extract it and register it here. |
std::unique_ptr<base::SharedPersistentMemoryAllocator> allocator = |
@@ -147,19 +242,16 @@ void SubprocessMetricsProvider::RenderProcessExited( |
content::RenderProcessHost* host, |
base::TerminationStatus status, |
int exit_code) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- |
DeregisterSubprocessAllocator(host->GetID()); |
} |
void SubprocessMetricsProvider::RenderProcessHostDestroyed( |
content::RenderProcessHost* host) { |
- DCHECK(thread_checker_.CalledOnValidThread()); |
- |
// It's possible for a Renderer to terminate without RenderProcessExited |
// (above) being called so it's necessary to de-register also upon the |
// destruction of the host. If both get called, no harm is done. |
- |
DeregisterSubprocessAllocator(host->GetID()); |
+ |
+ base::AutoLock lock(lock_); |
scoped_observer_.Remove(host); |
} |