OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/metrics/statistics_recorder.h" | 5 #include "base/metrics/statistics_recorder.h" |
6 | 6 |
7 #include "base/debug/leak_annotations.h" | 7 #include "base/debug/leak_annotations.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
9 #include "base/metrics/histogram.h" | 9 #include "base/metrics/histogram.h" |
10 #include "base/stringprintf.h" | 10 #include "base/stringprintf.h" |
11 #include "base/synchronization/lock.h" | 11 #include "base/synchronization/lock.h" |
12 | 12 |
13 using std::string; | |
14 using std::list; | |
jar (doing other things)
2012/08/01 00:26:10
nit: perhaps alphabetize
kaiwang
2012/08/01 04:13:21
Done.
| |
15 | |
13 namespace { | 16 namespace { |
14 // Initialize histogram statistics gathering system. | 17 // Initialize histogram statistics gathering system. |
15 base::LazyInstance<base::StatisticsRecorder>::Leaky | 18 base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ = |
16 g_statistics_recorder_ = LAZY_INSTANCE_INITIALIZER; | 19 LAZY_INSTANCE_INITIALIZER; |
17 } // namespace | 20 } // namespace |
18 | 21 |
19 namespace base { | 22 namespace base { |
20 | 23 |
21 // Collect the number of histograms created. | 24 // Collect the number of histograms created. |
22 static uint32 number_of_histograms_ = 0; | 25 static uint32 number_of_histograms_ = 0; |
23 // Collect the number of vectors saved because of caching ranges. | 26 // Collect the number of vectors saved because of caching ranges. |
24 static uint32 number_of_vectors_saved_ = 0; | 27 static uint32 number_of_vectors_saved_ = 0; |
25 // Collect the number of ranges_ elements saved because of caching ranges. | 28 // Collect the number of ranges_ elements saved because of caching ranges. |
26 static size_t saved_ranges_size_ = 0; | 29 static size_t saved_ranges_size_ = 0; |
27 | 30 |
28 // This singleton instance should be started during the single threaded portion | |
29 // of main(), and hence it is not thread safe. It initializes globals to | |
30 // provide support for all future calls. | |
31 StatisticsRecorder::StatisticsRecorder() { | |
32 DCHECK(!histograms_); | |
33 if (lock_ == NULL) { | |
34 // This will leak on purpose. It's the only way to make sure we won't race | |
35 // against the static uninitialization of the module while one of our | |
36 // static methods relying on the lock get called at an inappropriate time | |
37 // during the termination phase. Since it's a static data member, we will | |
38 // leak one per process, which would be similar to the instance allocated | |
39 // during static initialization and released only on process termination. | |
40 lock_ = new base::Lock; | |
41 } | |
42 base::AutoLock auto_lock(*lock_); | |
43 histograms_ = new HistogramMap; | |
44 ranges_ = new RangesMap; | |
45 } | |
46 | |
47 StatisticsRecorder::~StatisticsRecorder() { | |
48 DCHECK(histograms_ && lock_); | |
49 | |
50 if (dump_on_exit_) { | |
51 std::string output; | |
52 WriteGraph("", &output); | |
53 DLOG(INFO) << output; | |
54 } | |
55 // Clean up. | |
56 HistogramMap* histograms = NULL; | |
57 { | |
58 base::AutoLock auto_lock(*lock_); | |
59 histograms = histograms_; | |
60 histograms_ = NULL; | |
61 } | |
62 RangesMap* ranges = NULL; | |
63 { | |
64 base::AutoLock auto_lock(*lock_); | |
65 ranges = ranges_; | |
66 ranges_ = NULL; | |
67 } | |
68 // We are going to leak the histograms and the ranges. | |
69 delete histograms; | |
70 delete ranges; | |
71 // We don't delete lock_ on purpose to avoid having to properly protect | |
72 // against it going away after we checked for NULL in the static methods. | |
73 } | |
74 | |
75 // static | 31 // static |
76 void StatisticsRecorder::Initialize() { | 32 void StatisticsRecorder::Initialize() { |
77 // Ensure that an instance of the StatisticsRecorder object is created. | 33 // Ensure that an instance of the StatisticsRecorder object is created. |
78 g_statistics_recorder_.Get(); | 34 g_statistics_recorder_.Get(); |
79 } | 35 } |
80 | 36 |
81 | 37 |
82 // static | 38 // static |
83 bool StatisticsRecorder::IsActive() { | 39 bool StatisticsRecorder::IsActive() { |
84 if (lock_ == NULL) | 40 if (lock_ == NULL) |
85 return false; | 41 return false; |
86 base::AutoLock auto_lock(*lock_); | 42 base::AutoLock auto_lock(*lock_); |
87 return NULL != histograms_; | 43 return NULL != histograms_; |
88 } | 44 } |
89 | 45 |
46 // static | |
90 Histogram* StatisticsRecorder::RegisterOrDeleteDuplicate(Histogram* histogram) { | 47 Histogram* StatisticsRecorder::RegisterOrDeleteDuplicate(Histogram* histogram) { |
91 // As per crbug.com/79322 the histograms are intentionally leaked, so we need | 48 // As per crbug.com/79322 the histograms are intentionally leaked, so we need |
92 // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once | 49 // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once |
93 // for an object, the duplicates should not be annotated. | 50 // for an object, the duplicates should not be annotated. |
94 // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr) | 51 // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr) |
95 // twice if (lock_ == NULL) || (!histograms_). | 52 // twice if (lock_ == NULL) || (!histograms_). |
96 DCHECK(histogram->HasValidRangeChecksum()); | 53 if (lock_ == NULL || histograms_ == NULL) { |
jar (doing other things)
2012/08/01 00:26:10
I'd prefer to have all accesses to histograms_ mad
kaiwang
2012/08/01 04:13:21
Done.
| |
97 if (lock_ == NULL) { | |
98 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 | 54 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 |
99 return histogram; | 55 return histogram; |
100 } | 56 } |
101 base::AutoLock auto_lock(*lock_); | 57 base::AutoLock auto_lock(*lock_); |
102 if (!histograms_) { | 58 const string& name = histogram->histogram_name(); |
103 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 | |
104 return histogram; | |
105 } | |
106 const std::string name = histogram->histogram_name(); | |
107 HistogramMap::iterator it = histograms_->find(name); | 59 HistogramMap::iterator it = histograms_->find(name); |
108 // Avoid overwriting a previous registration. | 60 // Avoid overwriting a previous registration. |
109 if (histograms_->end() == it) { | 61 if (histograms_->end() == it) { |
110 (*histograms_)[name] = histogram; | 62 (*histograms_)[name] = histogram; |
111 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 | 63 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322 |
112 RegisterOrDeleteDuplicateRanges(histogram); | |
113 ++number_of_histograms_; | 64 ++number_of_histograms_; |
114 } else { | 65 } else { |
115 delete histogram; // We already have one by this name. | 66 if (histogram != it->second) { |
116 histogram = it->second; | 67 // We already have one by this name. |
68 delete histogram; | |
jar (doing other things)
2012/08/01 00:26:10
Better would be to do the delete outside the lock.
kaiwang
2012/08/01 04:13:21
Done.
| |
69 histogram = it->second; | |
70 } | |
117 } | 71 } |
118 return histogram; | 72 return histogram; |
119 } | 73 } |
120 | 74 |
121 // static | 75 // static |
122 void StatisticsRecorder::RegisterOrDeleteDuplicateRanges(Histogram* histogram) { | 76 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges( |
123 DCHECK(histogram); | 77 const BucketRanges* ranges) { |
124 BucketRanges* histogram_ranges = histogram->bucket_ranges(); | 78 DCHECK(ranges->HasValidChecksum()); |
125 DCHECK(histogram_ranges); | 79 if (lock_ == NULL || ranges_ == NULL) { |
jar (doing other things)
2012/08/01 00:26:10
Try to avoid access to ranges_ without the lock be
kaiwang
2012/08/01 04:13:21
Done.
| |
126 uint32 checksum = histogram->range_checksum(); | 80 ANNOTATE_LEAKING_OBJECT_PTR(ranges); |
127 histogram_ranges->set_checksum(checksum); | 81 return ranges; |
82 } | |
83 base::AutoLock auto_lock(*lock_); | |
128 | 84 |
129 RangesMap::iterator ranges_it = ranges_->find(checksum); | 85 list<const BucketRanges*>* checksum_matching_list; |
86 RangesMap::iterator ranges_it = ranges_->find(ranges->checksum()); | |
130 if (ranges_->end() == ranges_it) { | 87 if (ranges_->end() == ranges_it) { |
131 // Register the new BucketRanges. | 88 // Add a new matching list to map. |
132 std::list<BucketRanges*>* checksum_matching_list( | 89 checksum_matching_list = new list<const BucketRanges*>(); |
133 new std::list<BucketRanges*>()); | 90 ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list); |
134 checksum_matching_list->push_front(histogram_ranges); | 91 (*ranges_)[ranges->checksum()] = checksum_matching_list; |
135 (*ranges_)[checksum] = checksum_matching_list; | 92 } else { |
136 return; | 93 checksum_matching_list = ranges_it->second; |
137 } | 94 } |
138 | 95 |
139 // Use the registered BucketRanges if the registered BucketRanges has same | 96 list<const BucketRanges*>::iterator checksum_matching_list_it; |
140 // ranges_ as |histogram|'s BucketRanges. | |
141 std::list<BucketRanges*>* checksum_matching_list = ranges_it->second; | |
142 std::list<BucketRanges*>::iterator checksum_matching_list_it; | |
143 for (checksum_matching_list_it = checksum_matching_list->begin(); | 97 for (checksum_matching_list_it = checksum_matching_list->begin(); |
144 checksum_matching_list_it != checksum_matching_list->end(); | 98 checksum_matching_list_it != checksum_matching_list->end(); |
145 ++checksum_matching_list_it) { | 99 ++checksum_matching_list_it) { |
146 BucketRanges* existing_histogram_ranges = *checksum_matching_list_it; | 100 const BucketRanges* existing_ranges = *checksum_matching_list_it; |
147 DCHECK(existing_histogram_ranges); | 101 if (existing_ranges->Equals(ranges)) { |
148 if (existing_histogram_ranges->Equals(histogram_ranges)) { | 102 if (existing_ranges != ranges) { |
149 histogram->set_bucket_ranges(existing_histogram_ranges); | 103 ++number_of_vectors_saved_; |
150 ++number_of_vectors_saved_; | 104 saved_ranges_size_ += ranges->size(); |
151 saved_ranges_size_ += histogram_ranges->size(); | 105 delete ranges; |
jar (doing other things)
2012/08/01 00:26:10
Use scoped_ptr to implicitly delete after we relea
kaiwang
2012/08/01 04:13:21
Done.
| |
152 delete histogram_ranges; | 106 } |
153 return; | 107 return existing_ranges; |
154 } | 108 } |
155 } | 109 } |
156 | |
157 // We haven't found a BucketRanges which has the same ranges. Register the | 110 // We haven't found a BucketRanges which has the same ranges. Register the |
158 // new BucketRanges. | 111 // new BucketRanges. |
159 DCHECK(checksum_matching_list_it == checksum_matching_list->end()); | 112 checksum_matching_list->push_front(ranges); |
160 checksum_matching_list->push_front(histogram_ranges); | 113 ANNOTATE_LEAKING_OBJECT_PTR(ranges); |
114 return ranges; | |
161 } | 115 } |
162 | 116 |
163 // static | 117 // static |
164 void StatisticsRecorder::CollectHistogramStats(const std::string& suffix) { | 118 void StatisticsRecorder::CollectHistogramStats(const std::string& suffix) { |
165 static int uma_upload_attempt = 0; | 119 static int uma_upload_attempt = 0; |
166 ++uma_upload_attempt; | 120 ++uma_upload_attempt; |
167 if (uma_upload_attempt == 1) { | 121 if (uma_upload_attempt == 1) { |
168 UMA_HISTOGRAM_COUNTS_10000( | 122 UMA_HISTOGRAM_COUNTS_10000( |
169 "Histogram.SharedRange.Count.FirstUpload." + suffix, | 123 "Histogram.SharedRange.Count.FirstUpload." + suffix, |
170 number_of_histograms_); | 124 number_of_histograms_); |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
236 for (Histograms::iterator it = snapshot.begin(); | 190 for (Histograms::iterator it = snapshot.begin(); |
237 it != snapshot.end(); | 191 it != snapshot.end(); |
238 ++it) { | 192 ++it) { |
239 (*it)->WriteAscii(output); | 193 (*it)->WriteAscii(output); |
240 output->append("\n"); | 194 output->append("\n"); |
241 } | 195 } |
242 } | 196 } |
243 | 197 |
244 // static | 198 // static |
245 void StatisticsRecorder::GetHistograms(Histograms* output) { | 199 void StatisticsRecorder::GetHistograms(Histograms* output) { |
246 if (lock_ == NULL) | 200 if (lock_ == NULL || histograms_ == NULL) |
247 return; | 201 return; |
248 base::AutoLock auto_lock(*lock_); | 202 base::AutoLock auto_lock(*lock_); |
249 if (!histograms_) | |
jar (doing other things)
2012/08/01 00:26:10
better to access under lock.
| |
250 return; | |
251 for (HistogramMap::iterator it = histograms_->begin(); | 203 for (HistogramMap::iterator it = histograms_->begin(); |
252 histograms_->end() != it; | 204 histograms_->end() != it; |
253 ++it) { | 205 ++it) { |
254 DCHECK_EQ(it->first, it->second->histogram_name()); | 206 DCHECK_EQ(it->first, it->second->histogram_name()); |
255 output->push_back(it->second); | 207 output->push_back(it->second); |
256 } | 208 } |
257 } | 209 } |
258 | 210 |
259 // static | 211 // static |
212 void StatisticsRecorder::GetBucketRanges( | |
213 std::vector<const BucketRanges*>* output) { | |
214 if (lock_ == NULL || ranges_ == NULL) | |
jar (doing other things)
2012/08/01 00:26:10
access ranges_ under lock
several times below
kaiwang
2012/08/01 04:13:21
Done.
| |
215 return; | |
216 base::AutoLock auto_lock(*lock_); | |
217 for (RangesMap::iterator it = ranges_->begin(); | |
218 ranges_->end() != it; | |
219 ++it) { | |
220 list<const BucketRanges*>* ranges_list = it->second; | |
221 list<const BucketRanges*>::iterator ranges_list_it; | |
222 for (ranges_list_it = ranges_list->begin(); | |
223 ranges_list_it != ranges_list->end(); | |
224 ++ranges_list_it) { | |
225 output->push_back(*ranges_list_it); | |
226 } | |
227 } | |
228 } | |
229 | |
230 // static | |
260 Histogram* StatisticsRecorder::FindHistogram(const std::string& name) { | 231 Histogram* StatisticsRecorder::FindHistogram(const std::string& name) { |
261 if (lock_ == NULL) | 232 if (lock_ == NULL || histograms_ == NULL) |
262 return NULL; | 233 return NULL; |
263 base::AutoLock auto_lock(*lock_); | 234 base::AutoLock auto_lock(*lock_); |
264 if (!histograms_) | |
265 return NULL; | |
266 HistogramMap::iterator it = histograms_->find(name); | 235 HistogramMap::iterator it = histograms_->find(name); |
267 if (histograms_->end() == it) | 236 if (histograms_->end() == it) |
268 return NULL; | 237 return NULL; |
269 return it->second; | 238 return it->second; |
270 } | 239 } |
271 | 240 |
272 // private static | 241 // private static |
273 void StatisticsRecorder::GetSnapshot(const std::string& query, | 242 void StatisticsRecorder::GetSnapshot(const std::string& query, |
274 Histograms* snapshot) { | 243 Histograms* snapshot) { |
275 if (lock_ == NULL) | 244 if (lock_ == NULL || histograms_ == NULL) |
276 return; | 245 return; |
277 base::AutoLock auto_lock(*lock_); | 246 base::AutoLock auto_lock(*lock_); |
278 if (!histograms_) | |
279 return; | |
280 for (HistogramMap::iterator it = histograms_->begin(); | 247 for (HistogramMap::iterator it = histograms_->begin(); |
281 histograms_->end() != it; | 248 histograms_->end() != it; |
282 ++it) { | 249 ++it) { |
283 if (it->first.find(query) != std::string::npos) | 250 if (it->first.find(query) != std::string::npos) |
284 snapshot->push_back(it->second); | 251 snapshot->push_back(it->second); |
285 } | 252 } |
286 } | 253 } |
287 | 254 |
255 // This singleton instance should be started during the single threaded portion | |
256 // of main(), and hence it is not thread safe. It initializes globals to | |
257 // provide support for all future calls. | |
258 StatisticsRecorder::StatisticsRecorder() { | |
259 DCHECK(!histograms_); | |
260 if (lock_ == NULL) { | |
261 // This will leak on purpose. It's the only way to make sure we won't race | |
262 // against the static uninitialization of the module while one of our | |
263 // static methods relying on the lock get called at an inappropriate time | |
264 // during the termination phase. Since it's a static data member, we will | |
265 // leak one per process, which would be similar to the instance allocated | |
266 // during static initialization and released only on process termination. | |
267 lock_ = new base::Lock; | |
268 } | |
269 base::AutoLock auto_lock(*lock_); | |
270 histograms_ = new HistogramMap; | |
271 ranges_ = new RangesMap; | |
272 } | |
273 | |
274 StatisticsRecorder::~StatisticsRecorder() { | |
275 DCHECK(histograms_ && lock_); | |
276 | |
277 if (dump_on_exit_) { | |
278 string output; | |
279 WriteGraph("", &output); | |
280 DLOG(INFO) << output; | |
281 } | |
282 // Clean up. | |
283 HistogramMap* histograms = NULL; | |
284 { | |
285 base::AutoLock auto_lock(*lock_); | |
286 histograms = histograms_; | |
287 histograms_ = NULL; | |
288 } | |
289 RangesMap* ranges = NULL; | |
290 { | |
291 base::AutoLock auto_lock(*lock_); | |
292 ranges = ranges_; | |
293 ranges_ = NULL; | |
294 } | |
295 // We are going to leak the histograms and the ranges. | |
296 delete histograms; | |
297 delete ranges; | |
jar (doing other things)
2012/08/01 00:26:10
This was moved... but could be cleaner with smart_
kaiwang
2012/08/01 04:13:21
Done.
| |
298 // We don't delete lock_ on purpose to avoid having to properly protect | |
299 // against it going away after we checked for NULL in the static methods. | |
300 } | |
301 | |
302 | |
288 // static | 303 // static |
289 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL; | 304 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL; |
290 // static | 305 // static |
291 StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL; | 306 StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL; |
292 // static | 307 // static |
293 base::Lock* StatisticsRecorder::lock_ = NULL; | 308 base::Lock* StatisticsRecorder::lock_ = NULL; |
294 // static | 309 // static |
295 bool StatisticsRecorder::dump_on_exit_ = false; | 310 bool StatisticsRecorder::dump_on_exit_ = false; |
296 | 311 |
297 } // namespace base | 312 } // namespace base |
OLD | NEW |