OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/metrics/statistics_recorder.h" | 5 #include "base/metrics/statistics_recorder.h" |
6 | 6 |
7 #include "base/debug/leak_annotations.h" | 7 #include "base/debug/leak_annotations.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
9 #include "base/memory/scoped_ptr.h" | |
9 #include "base/metrics/histogram.h" | 10 #include "base/metrics/histogram.h" |
10 #include "base/stringprintf.h" | 11 #include "base/stringprintf.h" |
11 #include "base/synchronization/lock.h" | 12 #include "base/synchronization/lock.h" |
12 | 13 |
14 using std::list; | |
15 using std::string; | |
16 | |
13 namespace { | 17 namespace { |
14 // Initialize histogram statistics gathering system. | 18 // Initialize histogram statistics gathering system. |
15 base::LazyInstance<base::StatisticsRecorder>::Leaky | 19 base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ = |
16 g_statistics_recorder_ = LAZY_INSTANCE_INITIALIZER; | 20 LAZY_INSTANCE_INITIALIZER; |
17 } // namespace | 21 } // namespace |
18 | 22 |
19 namespace base { | 23 namespace base { |
20 | 24 |
21 // Collect the number of histograms created. | 25 // Collect the number of histograms created. |
22 static uint32 number_of_histograms_ = 0; | 26 static uint32 number_of_histograms_ = 0; |
23 // Collect the number of vectors saved because of caching ranges. | 27 // Collect the number of vectors saved because of caching ranges. |
24 static uint32 number_of_vectors_saved_ = 0; | 28 static uint32 number_of_vectors_saved_ = 0; |
25 // Collect the number of ranges_ elements saved because of caching ranges. | 29 // Collect the number of ranges_ elements saved because of caching ranges. |
26 static size_t saved_ranges_size_ = 0; | 30 static size_t saved_ranges_size_ = 0; |
27 | 31 |
28 // This singleton instance should be started during the single threaded portion | |
29 // of main(), and hence it is not thread safe. It initializes globals to | |
30 // provide support for all future calls. | |
31 StatisticsRecorder::StatisticsRecorder() { | |
32 DCHECK(!histograms_); | |
33 if (lock_ == NULL) { | |
34 // This will leak on purpose. It's the only way to make sure we won't race | |
35 // against the static uninitialization of the module while one of our | |
36 // static methods relying on the lock get called at an inappropriate time | |
37 // during the termination phase. Since it's a static data member, we will | |
38 // leak one per process, which would be similar to the instance allocated | |
39 // during static initialization and released only on process termination. | |
40 lock_ = new base::Lock; | |
41 } | |
42 base::AutoLock auto_lock(*lock_); | |
43 histograms_ = new HistogramMap; | |
44 ranges_ = new RangesMap; | |
45 } | |
46 | |
47 StatisticsRecorder::~StatisticsRecorder() { | |
48 DCHECK(histograms_ && lock_); | |
49 | |
50 if (dump_on_exit_) { | |
51 std::string output; | |
52 WriteGraph("", &output); | |
53 DLOG(INFO) << output; | |
54 } | |
55 // Clean up. | |
56 HistogramMap* histograms = NULL; | |
57 { | |
58 base::AutoLock auto_lock(*lock_); | |
59 histograms = histograms_; | |
60 histograms_ = NULL; | |
61 } | |
62 RangesMap* ranges = NULL; | |
63 { | |
64 base::AutoLock auto_lock(*lock_); | |
65 ranges = ranges_; | |
66 ranges_ = NULL; | |
67 } | |
68 // We are going to leak the histograms and the ranges. | |
69 delete histograms; | |
70 delete ranges; | |
71 // We don't delete lock_ on purpose to avoid having to properly protect | |
72 // against it going away after we checked for NULL in the static methods. | |
73 } | |
74 | |
75 // static | 32 // static |
76 void StatisticsRecorder::Initialize() { | 33 void StatisticsRecorder::Initialize() { |
77 // Ensure that an instance of the StatisticsRecorder object is created. | 34 // Ensure that an instance of the StatisticsRecorder object is created. |
78 g_statistics_recorder_.Get(); | 35 g_statistics_recorder_.Get(); |
79 } | 36 } |
80 | 37 |
81 | 38 |
82 // static | 39 // static |
83 bool StatisticsRecorder::IsActive() { | 40 bool StatisticsRecorder::IsActive() { |
84 if (lock_ == NULL) | 41 if (lock_ == NULL) |
85 return false; | 42 return false; |
86 base::AutoLock auto_lock(*lock_); | 43 base::AutoLock auto_lock(*lock_); |
87 return NULL != histograms_; | 44 return NULL != histograms_; |
88 } | 45 } |
89 | 46 |
47 // static | |
90 Histogram* StatisticsRecorder::RegisterOrDeleteDuplicate(Histogram* histogram) { | 48 Histogram* StatisticsRecorder::RegisterOrDeleteDuplicate(Histogram* histogram) { |
91 // As per crbug.com/79322 the histograms are intentionally leaked, so we need | 49 // As per crbug.com/79322 the histograms are intentionally leaked, so we need |
92 // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once | 50 // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once |
93 // for an object, the duplicates should not be annotated. | 51 // for an object, the duplicates should not be annotated. |
94 // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr) | 52 // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr) |
95 // twice if (lock_ == NULL) || (!histograms_). | 53 // twice if (lock_ == NULL) || (!histograms_). |
96 DCHECK(histogram->HasValidRangeChecksum()); | |
97 if (lock_ == NULL) { | 54 if (lock_ == NULL) { |
98 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 | 55 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 |
99 return histogram; | 56 return histogram; |
100 } | 57 } |
101 base::AutoLock auto_lock(*lock_); | 58 |
102 if (!histograms_) { | 59 Histogram* histogram_to_delete = NULL; |
103 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 | 60 Histogram* histogram_to_return = NULL; |
104 return histogram; | 61 { |
62 base::AutoLock auto_lock(*lock_); | |
63 if (histograms_ == NULL) { | |
64 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 | |
65 histogram_to_return = histogram; | |
66 } else { | |
67 const string& name = histogram->histogram_name(); | |
68 HistogramMap::iterator it = histograms_->find(name); | |
69 if (histograms_->end() == it) { | |
70 (*histograms_)[name] = histogram; | |
71 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 | |
72 ++number_of_histograms_; | |
73 histogram_to_return = histogram; | |
74 } else if (histogram == it->second) { | |
75 // The histogram was registered before. | |
76 histogram_to_return = histogram; | |
77 } else { | |
78 // We already have one histogram with this name. | |
79 histogram_to_return = it->second; | |
80 histogram_to_delete = histogram; | |
81 } | |
82 } | |
105 } | 83 } |
106 const std::string name = histogram->histogram_name(); | 84 delete histogram_to_delete; |
107 HistogramMap::iterator it = histograms_->find(name); | 85 return histogram_to_return; |
108 // Avoid overwriting a previous registration. | |
109 if (histograms_->end() == it) { | |
110 (*histograms_)[name] = histogram; | |
111 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 | |
112 RegisterOrDeleteDuplicateRanges(histogram); | |
113 ++number_of_histograms_; | |
114 } else { | |
115 delete histogram; // We already have one by this name. | |
116 histogram = it->second; | |
117 } | |
118 return histogram; | |
119 } | 86 } |
120 | 87 |
121 // static | 88 // static |
122 void StatisticsRecorder::RegisterOrDeleteDuplicateRanges(Histogram* histogram) { | 89 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges( |
123 DCHECK(histogram); | 90 const BucketRanges* ranges) { |
124 BucketRanges* histogram_ranges = histogram->bucket_ranges(); | 91 DCHECK(ranges->HasValidChecksum()); |
125 DCHECK(histogram_ranges); | 92 if (lock_ == NULL) { |
126 uint32 checksum = histogram->range_checksum(); | 93 ANNOTATE_LEAKING_OBJECT_PTR(ranges); |
127 histogram_ranges->set_checksum(checksum); | 94 return ranges; |
128 | |
129 RangesMap::iterator ranges_it = ranges_->find(checksum); | |
130 if (ranges_->end() == ranges_it) { | |
131 // Register the new BucketRanges. | |
132 std::list<BucketRanges*>* checksum_matching_list( | |
133 new std::list<BucketRanges*>()); | |
134 checksum_matching_list->push_front(histogram_ranges); | |
135 (*ranges_)[checksum] = checksum_matching_list; | |
136 return; | |
137 } | 95 } |
138 | 96 |
139 // Use the registered BucketRanges if the registered BucketRanges has same | 97 scoped_ptr<const BucketRanges> ranges_deleter(ranges); |
jar (doing other things)
2012/08/01 16:41:25
IMO, you should wait till the last moment to push
kaiwang
2012/08/01 19:48:19
Done.
| |
140 // ranges_ as |histogram|'s BucketRanges. | 98 base::AutoLock auto_lock(*lock_); |
141 std::list<BucketRanges*>* checksum_matching_list = ranges_it->second; | 99 if (ranges_ == NULL) { |
142 std::list<BucketRanges*>::iterator checksum_matching_list_it; | 100 ANNOTATE_LEAKING_OBJECT_PTR(ranges); |
101 return ranges_deleter.release(); | |
102 } | |
103 | |
104 list<const BucketRanges*>* checksum_matching_list; | |
105 RangesMap::iterator ranges_it = ranges_->find(ranges->checksum()); | |
106 if (ranges_->end() == ranges_it) { | |
107 // Add a new matching list to map. | |
108 checksum_matching_list = new list<const BucketRanges*>(); | |
109 ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list); | |
110 (*ranges_)[ranges->checksum()] = checksum_matching_list; | |
111 } else { | |
112 checksum_matching_list = ranges_it->second; | |
113 } | |
114 | |
115 list<const BucketRanges*>::iterator checksum_matching_list_it; | |
143 for (checksum_matching_list_it = checksum_matching_list->begin(); | 116 for (checksum_matching_list_it = checksum_matching_list->begin(); |
144 checksum_matching_list_it != checksum_matching_list->end(); | 117 checksum_matching_list_it != checksum_matching_list->end(); |
145 ++checksum_matching_list_it) { | 118 ++checksum_matching_list_it) { |
146 BucketRanges* existing_histogram_ranges = *checksum_matching_list_it; | 119 const BucketRanges* existing_ranges = *checksum_matching_list_it; |
147 DCHECK(existing_histogram_ranges); | 120 if (existing_ranges->Equals(ranges)) { |
148 if (existing_histogram_ranges->Equals(histogram_ranges)) { | 121 if (existing_ranges == ranges) { |
149 histogram->set_bucket_ranges(existing_histogram_ranges); | 122 return ranges_deleter.release(); |
150 ++number_of_vectors_saved_; | 123 } else { |
151 saved_ranges_size_ += histogram_ranges->size(); | 124 ++number_of_vectors_saved_; |
152 delete histogram_ranges; | 125 saved_ranges_size_ += ranges->size(); |
153 return; | 126 return existing_ranges; |
127 } | |
154 } | 128 } |
155 } | 129 } |
156 | |
157 // We haven't found a BucketRanges which has the same ranges. Register the | 130 // We haven't found a BucketRanges which has the same ranges. Register the |
158 // new BucketRanges. | 131 // new BucketRanges. |
159 DCHECK(checksum_matching_list_it == checksum_matching_list->end()); | 132 checksum_matching_list->push_front(ranges); |
160 checksum_matching_list->push_front(histogram_ranges); | 133 ANNOTATE_LEAKING_OBJECT_PTR(ranges); |
134 return ranges_deleter.release(); | |
161 } | 135 } |
162 | 136 |
163 // static | 137 // static |
164 void StatisticsRecorder::CollectHistogramStats(const std::string& suffix) { | 138 void StatisticsRecorder::CollectHistogramStats(const std::string& suffix) { |
165 static int uma_upload_attempt = 0; | 139 static int uma_upload_attempt = 0; |
166 ++uma_upload_attempt; | 140 ++uma_upload_attempt; |
167 if (uma_upload_attempt == 1) { | 141 if (uma_upload_attempt == 1) { |
168 UMA_HISTOGRAM_COUNTS_10000( | 142 UMA_HISTOGRAM_COUNTS_10000( |
169 "Histogram.SharedRange.Count.FirstUpload." + suffix, | 143 "Histogram.SharedRange.Count.FirstUpload." + suffix, |
170 number_of_histograms_); | 144 number_of_histograms_); |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
239 (*it)->WriteAscii(output); | 213 (*it)->WriteAscii(output); |
240 output->append("\n"); | 214 output->append("\n"); |
241 } | 215 } |
242 } | 216 } |
243 | 217 |
244 // static | 218 // static |
245 void StatisticsRecorder::GetHistograms(Histograms* output) { | 219 void StatisticsRecorder::GetHistograms(Histograms* output) { |
246 if (lock_ == NULL) | 220 if (lock_ == NULL) |
247 return; | 221 return; |
248 base::AutoLock auto_lock(*lock_); | 222 base::AutoLock auto_lock(*lock_); |
249 if (!histograms_) | 223 if (histograms_ == NULL) |
250 return; | 224 return; |
225 | |
251 for (HistogramMap::iterator it = histograms_->begin(); | 226 for (HistogramMap::iterator it = histograms_->begin(); |
252 histograms_->end() != it; | 227 histograms_->end() != it; |
253 ++it) { | 228 ++it) { |
254 DCHECK_EQ(it->first, it->second->histogram_name()); | 229 DCHECK_EQ(it->first, it->second->histogram_name()); |
255 output->push_back(it->second); | 230 output->push_back(it->second); |
256 } | 231 } |
257 } | 232 } |
258 | 233 |
259 // static | 234 // static |
235 void StatisticsRecorder::GetBucketRanges( | |
236 std::vector<const BucketRanges*>* output) { | |
237 if (lock_ == NULL) | |
238 return; | |
239 base::AutoLock auto_lock(*lock_); | |
240 if (ranges_ == NULL) | |
241 return; | |
242 | |
243 for (RangesMap::iterator it = ranges_->begin(); | |
244 ranges_->end() != it; | |
245 ++it) { | |
246 list<const BucketRanges*>* ranges_list = it->second; | |
247 list<const BucketRanges*>::iterator ranges_list_it; | |
248 for (ranges_list_it = ranges_list->begin(); | |
249 ranges_list_it != ranges_list->end(); | |
250 ++ranges_list_it) { | |
251 output->push_back(*ranges_list_it); | |
252 } | |
253 } | |
254 } | |
255 | |
256 // static | |
260 Histogram* StatisticsRecorder::FindHistogram(const std::string& name) { | 257 Histogram* StatisticsRecorder::FindHistogram(const std::string& name) { |
261 if (lock_ == NULL) | 258 if (lock_ == NULL) |
262 return NULL; | 259 return NULL; |
263 base::AutoLock auto_lock(*lock_); | 260 base::AutoLock auto_lock(*lock_); |
264 if (!histograms_) | 261 if (histograms_ == NULL) |
265 return NULL; | 262 return NULL; |
263 | |
266 HistogramMap::iterator it = histograms_->find(name); | 264 HistogramMap::iterator it = histograms_->find(name); |
267 if (histograms_->end() == it) | 265 if (histograms_->end() == it) |
268 return NULL; | 266 return NULL; |
269 return it->second; | 267 return it->second; |
270 } | 268 } |
271 | 269 |
272 // private static | 270 // private static |
273 void StatisticsRecorder::GetSnapshot(const std::string& query, | 271 void StatisticsRecorder::GetSnapshot(const std::string& query, |
274 Histograms* snapshot) { | 272 Histograms* snapshot) { |
275 if (lock_ == NULL) | 273 if (lock_ == NULL) |
276 return; | 274 return; |
277 base::AutoLock auto_lock(*lock_); | 275 base::AutoLock auto_lock(*lock_); |
278 if (!histograms_) | 276 if (histograms_ == NULL) |
279 return; | 277 return; |
278 | |
280 for (HistogramMap::iterator it = histograms_->begin(); | 279 for (HistogramMap::iterator it = histograms_->begin(); |
281 histograms_->end() != it; | 280 histograms_->end() != it; |
282 ++it) { | 281 ++it) { |
283 if (it->first.find(query) != std::string::npos) | 282 if (it->first.find(query) != std::string::npos) |
284 snapshot->push_back(it->second); | 283 snapshot->push_back(it->second); |
285 } | 284 } |
286 } | 285 } |
287 | 286 |
287 // This singleton instance should be started during the single threaded portion | |
288 // of main(), and hence it is not thread safe. It initializes globals to | |
289 // provide support for all future calls. | |
290 StatisticsRecorder::StatisticsRecorder() { | |
291 DCHECK(!histograms_); | |
292 if (lock_ == NULL) { | |
293 // This will leak on purpose. It's the only way to make sure we won't race | |
294 // against the static uninitialization of the module while one of our | |
295 // static methods relying on the lock get called at an inappropriate time | |
296 // during the termination phase. Since it's a static data member, we will | |
297 // leak one per process, which would be similar to the instance allocated | |
298 // during static initialization and released only on process termination. | |
299 lock_ = new base::Lock; | |
300 } | |
301 base::AutoLock auto_lock(*lock_); | |
302 histograms_ = new HistogramMap; | |
303 ranges_ = new RangesMap; | |
304 } | |
305 | |
306 StatisticsRecorder::~StatisticsRecorder() { | |
307 DCHECK(histograms_ && ranges_ && lock_); | |
308 | |
309 scoped_ptr<HistogramMap> histograms_deleter(histograms_); | |
310 scoped_ptr<RangesMap> ranges_deleter(ranges_); | |
jar (doing other things)
2012/08/01 16:41:25
For both of these, you should wait until you are i
kaiwang
2012/08/01 19:48:19
Done.
| |
311 // We don't delete lock_ on purpose to avoid having to properly protect | |
312 // against it going away after we checked for NULL in the static methods. | |
313 | |
314 if (dump_on_exit_) { | |
315 string output; | |
316 WriteGraph("", &output); | |
317 DLOG(INFO) << output; | |
318 } | |
319 | |
320 // Clean up. | |
321 { | |
322 base::AutoLock auto_lock(*lock_); | |
323 histograms_ = NULL; | |
324 ranges_ = NULL; | |
325 } | |
326 // We are going to leak the histograms and the ranges. | |
327 } | |
328 | |
329 | |
288 // static | 330 // static |
289 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL; | 331 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL; |
290 // static | 332 // static |
291 StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL; | 333 StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL; |
292 // static | 334 // static |
293 base::Lock* StatisticsRecorder::lock_ = NULL; | 335 base::Lock* StatisticsRecorder::lock_ = NULL; |
294 // static | 336 // static |
295 bool StatisticsRecorder::dump_on_exit_ = false; | 337 bool StatisticsRecorder::dump_on_exit_ = false; |
296 | 338 |
297 } // namespace base | 339 } // namespace base |
OLD | NEW |