OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // Histogram is an object that aggregates statistics, and can summarize them in | 5 // Histogram is an object that aggregates statistics, and can summarize them in |
6 // various forms, including ASCII graphical, HTML, and numerically (as a | 6 // various forms, including ASCII graphical, HTML, and numerically (as a |
7 // vector of numbers corresponding to each of the aggregating buckets). | 7 // vector of numbers corresponding to each of the aggregating buckets). |
8 // See header file for details and examples. | 8 // See header file for details and examples. |
9 | 9 |
10 #include "base/metrics/histogram.h" | 10 #include "base/metrics/histogram.h" |
(...skipping 13 matching lines...) Expand all Loading... | |
24 #include "base/synchronization/lock.h" | 24 #include "base/synchronization/lock.h" |
25 | 25 |
26 using std::string; | 26 using std::string; |
27 using std::vector; | 27 using std::vector; |
28 | 28 |
29 namespace base { | 29 namespace base { |
30 | 30 |
31 typedef HistogramBase::Count Count; | 31 typedef HistogramBase::Count Count; |
32 typedef HistogramBase::Sample Sample; | 32 typedef HistogramBase::Sample Sample; |
33 | 33 |
34 BucketHistogramSamples::BucketHistogramSamples( | |
35 const BucketRanges* bucket_ranges) | |
36 : counts_(bucket_ranges->size() - 1), | |
37 bucket_ranges_(bucket_ranges) { | |
38 CHECK_GE(bucket_ranges_->size(), 2u); | |
39 } | |
40 BucketHistogramSamples::~BucketHistogramSamples() {} | |
41 | |
42 void BucketHistogramSamples::Accumulate(Sample value, Count count) { | |
43 size_t bucket_index = GetBucketIndex(value, bucket_ranges_); | |
44 counts_[bucket_index] += count; | |
45 set_sum(sum() + count * value); | |
46 set_redundant_count(redundant_count() + count); | |
Ilya Sherman
2012/08/29 08:48:16
Optional nit: Perhaps the HistogramSamples() class
kaiwang
2012/08/29 22:42:16
Changed IncreaseSum and IncreaseRedundantCount
| |
47 } | |
48 | |
49 Count BucketHistogramSamples::GetCount(Sample value) const { | |
50 size_t bucket_index = GetBucketIndex(value, bucket_ranges_); | |
51 return counts_[bucket_index]; | |
52 } | |
53 | |
54 Count BucketHistogramSamples::TotalCount() const { | |
55 Count count = 0; | |
56 for (size_t i = 0; i < counts_.size(); i++) { | |
57 count += counts_[i]; | |
58 } | |
59 return count; | |
60 } | |
61 | |
62 Count BucketHistogramSamples::GetCountFromBucketIndex( | |
63 size_t bucket_index) const { | |
Ilya Sherman
2012/08/29 08:48:16
nit: DCHECK that the index is in range?
kaiwang
2012/08/29 22:42:16
Done.
| |
64 return counts_[bucket_index]; | |
65 } | |
66 | |
67 scoped_ptr<SampleCountIterator> BucketHistogramSamples::Iterator() const { | |
68 return scoped_ptr<SampleCountIterator>( | |
69 new BucketHistogramSamplesIterator(&counts_, bucket_ranges_)); | |
70 } | |
71 | |
72 // Use simple binary search. This is very general, but there are better | |
73 // approaches if we knew that the buckets were linearly distributed. | |
74 // | |
75 // static | |
76 size_t BucketHistogramSamples::GetBucketIndex( | |
77 Sample value, const BucketRanges* bucket_ranges) { | |
78 size_t bucket_count = bucket_ranges->size() - 1; | |
79 CHECK_GE(bucket_count, 1u); | |
80 CHECK_GE(value, bucket_ranges->range(0)); | |
81 CHECK_LT(value, bucket_ranges->range(bucket_count)); | |
82 | |
83 size_t under = 0; | |
84 size_t over = bucket_count; | |
85 size_t mid; | |
86 do { | |
87 mid = under + (over - under)/2; | |
88 if (mid == under) | |
89 break; | |
90 if (bucket_ranges->range(mid) <= value) | |
91 under = mid; | |
92 else | |
93 over = mid; | |
94 } while (true); | |
95 return mid; | |
96 } | |
97 | |
98 bool BucketHistogramSamples::AddSubtractImpl(SampleCountIterator* iter, | |
99 bool is_add) { | |
100 HistogramBase::Sample min; | |
101 HistogramBase::Sample max; | |
102 HistogramBase::Count count; | |
103 | |
104 // Go through the iterator and add the counts into correct bucket. | |
Ilya Sherman
2012/08/29 08:48:16
The loop has three cases. Please describe each of
kaiwang
2012/08/29 22:42:16
Done.
| |
105 size_t index = 0; | |
106 while (index < counts_.size() && !iter->Done()) { | |
107 iter->Get(&min, &max, &count); | |
108 if (min == bucket_ranges_->range(index) && | |
109 max == bucket_ranges_->range(index + 1)) { | |
110 // Bucket match! | |
111 counts_[index] += is_add ? count : -count; | |
112 iter->Next(); | |
113 } else if (min > bucket_ranges_->range(index)) { | |
114 index++; | |
115 } else { | |
116 return false; | |
117 } | |
118 } | |
119 | |
120 return iter->Done(); | |
121 } | |
122 | |
123 BucketHistogramSamplesIterator::BucketHistogramSamplesIterator( | |
124 const vector<Count>* counts, | |
125 const BucketRanges* bucket_ranges) | |
126 : counts_(counts), | |
127 bucket_ranges_(bucket_ranges), | |
128 index_(-1), | |
Ilya Sherman
2012/08/29 08:48:16
nit: It looks like the code would be slightly clea
kaiwang
2012/08/29 22:42:16
Need a way to tell it's on first bucket (index ==
Ilya Sherman
2012/08/29 23:41:27
Can't you tell this by the check "index_ < counts_
kaiwang
2012/08/30 03:13:22
even when counts_ has a large size, it's still pos
Ilya Sherman
2012/08/30 07:57:39
But, the constructor immediately scans over to the
kaiwang
2012/09/07 01:14:11
Changed according to your suggestion. (note, I'll
| |
129 is_done_(false) { | |
130 CHECK_GT(bucket_ranges_->size(), counts_->size()); | |
131 ForwardToNextNonemptyBucket(); | |
132 } | |
133 | |
134 bool BucketHistogramSamplesIterator::Done() { | |
135 return is_done_; | |
Ilya Sherman
2012/08/30 07:57:39
Rather than keeping a separate variable for this,
| |
136 } | |
137 | |
138 void BucketHistogramSamplesIterator::Next() { | |
139 ForwardToNextNonemptyBucket(); | |
140 } | |
141 | |
142 void BucketHistogramSamplesIterator::Get(HistogramBase::Sample* min, | |
143 HistogramBase::Sample* max, | |
144 HistogramBase::Count* count) { | |
145 CHECK(!Done()); | |
146 if (min != NULL) | |
147 *min = bucket_ranges_->range(index_); | |
148 if (max != NULL) | |
149 *max = bucket_ranges_->range(index_ + 1); | |
150 if (count != NULL) | |
151 *count = (*counts_)[index_]; | |
152 } | |
153 | |
154 void BucketHistogramSamplesIterator::ForwardToNextNonemptyBucket() { | |
155 CHECK(!Done()); | |
Ilya Sherman
2012/08/30 07:57:40
This should probably be part of the public method
| |
156 | |
157 while (index_ < static_cast<int>(counts_->size()) - 1) { | |
158 index_++; | |
159 if ((*counts_)[index_] != 0) | |
160 return; | |
161 } | |
162 is_done_ = true; | |
163 } | |
164 | |
34 // static | 165 // static |
35 const size_t Histogram::kBucketCount_MAX = 16384u; | 166 const size_t Histogram::kBucketCount_MAX = 16384u; |
36 | 167 |
37 Histogram::SampleSet::SampleSet(size_t size) | |
38 : counts_(size, 0), | |
39 sum_(0), | |
40 redundant_count_(0) {} | |
41 | |
42 Histogram::SampleSet::SampleSet() | |
43 : counts_(), | |
44 sum_(0), | |
45 redundant_count_(0) {} | |
46 | |
47 Histogram::SampleSet::~SampleSet() {} | |
48 | |
49 void Histogram::SampleSet::Resize(size_t size) { | |
50 counts_.resize(size, 0); | |
51 } | |
52 | |
53 void Histogram::SampleSet::Accumulate(Sample value, Count count, | |
54 size_t index) { | |
55 DCHECK(count == 1 || count == -1); | |
56 counts_[index] += count; | |
57 sum_ += count * value; | |
58 redundant_count_ += count; | |
59 DCHECK_GE(counts_[index], 0); | |
60 DCHECK_GE(sum_, 0); | |
61 DCHECK_GE(redundant_count_, 0); | |
62 } | |
63 | |
64 Count Histogram::SampleSet::TotalCount() const { | |
65 Count total = 0; | |
66 for (Counts::const_iterator it = counts_.begin(); | |
67 it != counts_.end(); | |
68 ++it) { | |
69 total += *it; | |
70 } | |
71 return total; | |
72 } | |
73 | |
74 void Histogram::SampleSet::Add(const SampleSet& other) { | |
75 DCHECK_EQ(counts_.size(), other.counts_.size()); | |
76 sum_ += other.sum_; | |
77 redundant_count_ += other.redundant_count_; | |
78 for (size_t index = 0; index < counts_.size(); ++index) | |
79 counts_[index] += other.counts_[index]; | |
80 } | |
81 | |
82 void Histogram::SampleSet::Subtract(const SampleSet& other) { | |
83 DCHECK_EQ(counts_.size(), other.counts_.size()); | |
84 // Note: Race conditions in snapshotting a sum may lead to (temporary) | |
85 // negative values when snapshots are later combined (and deltas calculated). | |
86 // As a result, we don't currently CHCEK() for positive values. | |
87 sum_ -= other.sum_; | |
88 redundant_count_ -= other.redundant_count_; | |
89 for (size_t index = 0; index < counts_.size(); ++index) { | |
90 counts_[index] -= other.counts_[index]; | |
91 DCHECK_GE(counts_[index], 0); | |
92 } | |
93 } | |
94 | |
95 bool Histogram::SampleSet::Serialize(Pickle* pickle) const { | |
96 pickle->WriteInt64(sum_); | |
97 pickle->WriteInt64(redundant_count_); | |
98 pickle->WriteUInt64(counts_.size()); | |
99 | |
100 for (size_t index = 0; index < counts_.size(); ++index) { | |
101 pickle->WriteInt(counts_[index]); | |
102 } | |
103 | |
104 return true; | |
105 } | |
106 | |
107 bool Histogram::SampleSet::Deserialize(PickleIterator* iter) { | |
108 DCHECK_EQ(counts_.size(), 0u); | |
109 DCHECK_EQ(sum_, 0); | |
110 DCHECK_EQ(redundant_count_, 0); | |
111 | |
112 uint64 counts_size; | |
113 | |
114 if (!iter->ReadInt64(&sum_) || | |
115 !iter->ReadInt64(&redundant_count_) || | |
116 !iter->ReadUInt64(&counts_size)) { | |
117 return false; | |
118 } | |
119 | |
120 if (counts_size == 0) | |
121 return false; | |
122 | |
123 int count = 0; | |
124 for (uint64 index = 0; index < counts_size; ++index) { | |
125 int i; | |
126 if (!iter->ReadInt(&i)) | |
127 return false; | |
128 counts_.push_back(i); | |
129 count += i; | |
130 } | |
131 DCHECK_EQ(count, redundant_count_); | |
132 return count == redundant_count_; | |
133 } | |
134 | |
135 // TODO(rtenneti): delete this code after debugging. | 168 // TODO(rtenneti): delete this code after debugging. |
136 void CheckCorruption(const Histogram& histogram, bool new_histogram) { | 169 void CheckCorruption(const Histogram& histogram, bool new_histogram) { |
137 const std::string& histogram_name = histogram.histogram_name(); | 170 const std::string& histogram_name = histogram.histogram_name(); |
138 char histogram_name_buf[128]; | 171 char histogram_name_buf[128]; |
139 base::strlcpy(histogram_name_buf, | 172 base::strlcpy(histogram_name_buf, |
140 histogram_name.c_str(), | 173 histogram_name.c_str(), |
141 arraysize(histogram_name_buf)); | 174 arraysize(histogram_name_buf)); |
142 base::debug::Alias(histogram_name_buf); | 175 base::debug::Alias(histogram_name_buf); |
143 | 176 |
144 bool debug_new_histogram[1]; | 177 bool debug_new_histogram[1]; |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
238 if (next > current) | 271 if (next > current) |
239 current = next; | 272 current = next; |
240 else | 273 else |
241 ++current; // Just do a narrow bucket, and keep trying. | 274 ++current; // Just do a narrow bucket, and keep trying. |
242 ranges->set_range(bucket_index, current); | 275 ranges->set_range(bucket_index, current); |
243 } | 276 } |
244 ranges->set_range(ranges->size() - 1, HistogramBase::kSampleType_MAX); | 277 ranges->set_range(ranges->size() - 1, HistogramBase::kSampleType_MAX); |
245 ranges->ResetChecksum(); | 278 ranges->ResetChecksum(); |
246 } | 279 } |
247 | 280 |
248 // static | |
249 void Histogram::Add(int value) { | 281 void Histogram::Add(int value) { |
250 if (value > kSampleType_MAX - 1) | 282 if (value > ranges(bucket_count_) - 1) |
251 value = kSampleType_MAX - 1; | 283 value = ranges(bucket_count_) - 1; |
252 if (value < 0) | 284 if (value < ranges(0)) |
253 value = 0; | 285 value = ranges(0); |
254 size_t index = BucketIndex(value); | 286 samples_->Accumulate(value, 1); |
255 DCHECK_GE(value, ranges(index)); | |
256 DCHECK_LT(value, ranges(index + 1)); | |
257 Accumulate(value, 1, index); | |
258 } | 287 } |
259 | 288 |
260 void Histogram::AddBoolean(bool value) { | 289 void Histogram::AddBoolean(bool value) { |
261 DCHECK(false); | 290 DCHECK(false); |
262 } | 291 } |
263 | 292 |
264 void Histogram::AddSampleSet(const SampleSet& sample) { | 293 void Histogram::AddSamples(const HistogramSamples& samples) { |
265 sample_.Add(sample); | 294 samples_->Add(samples); |
295 } | |
296 | |
297 bool Histogram::AddSamples(PickleIterator* iter) { | |
298 return samples_->AddFromPickle(iter); | |
266 } | 299 } |
267 | 300 |
268 void Histogram::SetRangeDescriptions(const DescriptionPair descriptions[]) { | 301 void Histogram::SetRangeDescriptions(const DescriptionPair descriptions[]) { |
269 DCHECK(false); | 302 DCHECK(false); |
270 } | 303 } |
271 | 304 |
272 // The following methods provide a graphical histogram display. | 305 // The following methods provide a graphical histogram display. |
273 void Histogram::WriteHTMLGraph(string* output) const { | 306 void Histogram::WriteHTMLGraph(string* output) const { |
274 // TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc. | 307 // TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc. |
275 output->append("<PRE>"); | 308 output->append("<PRE>"); |
276 WriteAsciiImpl(true, "<br>", output); | 309 WriteAsciiImpl(true, "<br>", output); |
277 output->append("</PRE>"); | 310 output->append("</PRE>"); |
278 } | 311 } |
279 | 312 |
280 void Histogram::WriteAscii(string* output) const { | 313 void Histogram::WriteAscii(string* output) const { |
281 WriteAsciiImpl(true, "\n", output); | 314 WriteAsciiImpl(true, "\n", output); |
282 } | 315 } |
283 | 316 |
284 // static | 317 // static |
285 string Histogram::SerializeHistogramInfo(const Histogram& histogram, | 318 string Histogram::SerializeHistogramInfo(const Histogram& histogram, |
286 const SampleSet& snapshot) { | 319 const HistogramSamples& snapshot) { |
287 DCHECK_NE(NOT_VALID_IN_RENDERER, histogram.histogram_type()); | 320 DCHECK_NE(NOT_VALID_IN_RENDERER, histogram.histogram_type()); |
288 DCHECK(histogram.bucket_ranges()->HasValidChecksum()); | 321 DCHECK(histogram.bucket_ranges()->HasValidChecksum()); |
289 | 322 |
290 Pickle pickle; | 323 Pickle pickle; |
291 pickle.WriteString(histogram.histogram_name()); | 324 pickle.WriteString(histogram.histogram_name()); |
292 pickle.WriteInt(histogram.declared_min()); | 325 pickle.WriteInt(histogram.declared_min()); |
293 pickle.WriteInt(histogram.declared_max()); | 326 pickle.WriteInt(histogram.declared_max()); |
294 pickle.WriteUInt64(histogram.bucket_count()); | 327 pickle.WriteUInt64(histogram.bucket_count()); |
295 pickle.WriteUInt32(histogram.bucket_ranges()->checksum()); | 328 pickle.WriteUInt32(histogram.bucket_ranges()->checksum()); |
296 pickle.WriteInt(histogram.histogram_type()); | 329 pickle.WriteInt(histogram.histogram_type()); |
297 pickle.WriteInt(histogram.flags()); | 330 pickle.WriteInt(histogram.flags()); |
298 | 331 |
332 histogram.SerializeRanges(&pickle); | |
333 | |
299 snapshot.Serialize(&pickle); | 334 snapshot.Serialize(&pickle); |
300 | 335 |
301 histogram.SerializeRanges(&pickle); | |
302 | |
303 return string(static_cast<const char*>(pickle.data()), pickle.size()); | 336 return string(static_cast<const char*>(pickle.data()), pickle.size()); |
304 } | 337 } |
305 | 338 |
306 // static | 339 // static |
307 bool Histogram::DeserializeHistogramInfo(const string& histogram_info) { | 340 bool Histogram::DeserializeHistogramInfo(const string& histogram_info) { |
308 if (histogram_info.empty()) { | 341 if (histogram_info.empty()) { |
309 return false; | 342 return false; |
310 } | 343 } |
311 | 344 |
312 Pickle pickle(histogram_info.data(), | 345 Pickle pickle(histogram_info.data(), |
313 static_cast<int>(histogram_info.size())); | 346 static_cast<int>(histogram_info.size())); |
314 string histogram_name; | 347 string histogram_name; |
315 int declared_min; | 348 int declared_min; |
316 int declared_max; | 349 int declared_max; |
317 uint64 bucket_count; | 350 uint64 bucket_count; |
318 uint32 range_checksum; | 351 uint32 range_checksum; |
319 int histogram_type; | 352 int histogram_type; |
320 int pickle_flags; | 353 int pickle_flags; |
321 SampleSet sample; | |
322 | 354 |
323 PickleIterator iter(pickle); | 355 PickleIterator iter(pickle); |
324 if (!iter.ReadString(&histogram_name) || | 356 if (!iter.ReadString(&histogram_name) || |
325 !iter.ReadInt(&declared_min) || | 357 !iter.ReadInt(&declared_min) || |
326 !iter.ReadInt(&declared_max) || | 358 !iter.ReadInt(&declared_max) || |
327 !iter.ReadUInt64(&bucket_count) || | 359 !iter.ReadUInt64(&bucket_count) || |
328 !iter.ReadUInt32(&range_checksum) || | 360 !iter.ReadUInt32(&range_checksum) || |
329 !iter.ReadInt(&histogram_type) || | 361 !iter.ReadInt(&histogram_type) || |
330 !iter.ReadInt(&pickle_flags) || | 362 !iter.ReadInt(&pickle_flags)) { |
331 !sample.Histogram::SampleSet::Deserialize(&iter)) { | |
332 DLOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name; | 363 DLOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name; |
333 return false; | 364 return false; |
334 } | 365 } |
335 | 366 |
336 DCHECK(pickle_flags & kIPCSerializationSourceFlag); | 367 DCHECK(pickle_flags & kIPCSerializationSourceFlag); |
337 // Since these fields may have come from an untrusted renderer, do additional | 368 // Since these fields may have come from an untrusted renderer, do additional |
338 // checks above and beyond those in Histogram::Initialize() | 369 // checks above and beyond those in Histogram::Initialize() |
339 if (declared_max <= 0 || declared_min <= 0 || declared_max < declared_min || | 370 if (declared_max <= 0 || declared_min <= 0 || declared_max < declared_min || |
340 INT_MAX / sizeof(Count) <= bucket_count || bucket_count < 2) { | 371 INT_MAX / sizeof(Count) <= bucket_count || bucket_count < 2) { |
341 DLOG(ERROR) << "Values error decoding Histogram: " << histogram_name; | 372 DLOG(ERROR) << "Values error decoding Histogram: " << histogram_name; |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
375 DCHECK_EQ(render_histogram->bucket_count(), bucket_count); | 406 DCHECK_EQ(render_histogram->bucket_count(), bucket_count); |
376 DCHECK_EQ(render_histogram->histogram_type(), histogram_type); | 407 DCHECK_EQ(render_histogram->histogram_type(), histogram_type); |
377 | 408 |
378 if (render_histogram->bucket_ranges()->checksum() != range_checksum) { | 409 if (render_histogram->bucket_ranges()->checksum() != range_checksum) { |
379 return false; | 410 return false; |
380 } | 411 } |
381 | 412 |
382 if (render_histogram->flags() & kIPCSerializationSourceFlag) { | 413 if (render_histogram->flags() & kIPCSerializationSourceFlag) { |
383 DVLOG(1) << "Single process mode, histogram observed and not copied: " | 414 DVLOG(1) << "Single process mode, histogram observed and not copied: " |
384 << histogram_name; | 415 << histogram_name; |
385 } else { | 416 return true; |
386 DCHECK_EQ(flags & render_histogram->flags(), flags); | |
387 render_histogram->AddSampleSet(sample); | |
388 } | 417 } |
389 | 418 |
390 return true; | 419 DCHECK_EQ(flags & render_histogram->flags(), flags); |
420 return render_histogram->AddSamples(&iter); | |
Ilya Sherman
2012/08/29 08:48:16
Should failing to deserialize the Pickle trigger a
kaiwang
2012/08/29 22:42:16
We don't want to crash browser when some (corrupte
| |
391 } | 421 } |
392 | 422 |
423 // static | |
424 const int Histogram::kCommonRaceBasedCountMismatch = 5; | |
393 | 425 |
394 // Validate a sample and related histogram. | |
395 Histogram::Inconsistencies Histogram::FindCorruption( | 426 Histogram::Inconsistencies Histogram::FindCorruption( |
396 const SampleSet& snapshot) const { | 427 const HistogramSamples& samples) const { |
397 int inconsistencies = NO_INCONSISTENCIES; | 428 int inconsistencies = NO_INCONSISTENCIES; |
398 Sample previous_range = -1; // Bottom range is always 0. | 429 Sample previous_range = -1; // Bottom range is always 0. |
399 int64 count = 0; | |
400 for (size_t index = 0; index < bucket_count(); ++index) { | 430 for (size_t index = 0; index < bucket_count(); ++index) { |
401 count += snapshot.counts(index); | |
402 int new_range = ranges(index); | 431 int new_range = ranges(index); |
403 if (previous_range >= new_range) | 432 if (previous_range >= new_range) |
404 inconsistencies |= BUCKET_ORDER_ERROR; | 433 inconsistencies |= BUCKET_ORDER_ERROR; |
405 previous_range = new_range; | 434 previous_range = new_range; |
406 } | 435 } |
407 | 436 |
408 if (!bucket_ranges()->HasValidChecksum()) | 437 if (!bucket_ranges()->HasValidChecksum()) |
409 inconsistencies |= RANGE_CHECKSUM_ERROR; | 438 inconsistencies |= RANGE_CHECKSUM_ERROR; |
410 | 439 |
411 int64 delta64 = snapshot.redundant_count() - count; | 440 int64 delta64 = samples.redundant_count() - samples.TotalCount(); |
412 if (delta64 != 0) { | 441 if (delta64 != 0) { |
413 int delta = static_cast<int>(delta64); | 442 int delta = static_cast<int>(delta64); |
414 if (delta != delta64) | 443 if (delta != delta64) |
415 delta = INT_MAX; // Flag all giant errors as INT_MAX. | 444 delta = INT_MAX; // Flag all giant errors as INT_MAX. |
416 // Since snapshots of histograms are taken asynchronously relative to | |
417 // sampling (and snapped from different threads), it is pretty likely that | |
418 // we'll catch a redundant count that doesn't match the sample count. We | |
419 // allow for a certain amount of slop before flagging this as an | |
420 // inconsistency. Even with an inconsistency, we'll snapshot it again (for | |
421 // UMA in about a half hour, so we'll eventually get the data, if it was | |
422 // not the result of a corruption. If histograms show that 1 is "too tight" | |
423 // then we may try to use 2 or 3 for this slop value. | |
424 const int kCommonRaceBasedCountMismatch = 5; | |
425 if (delta > 0) { | 445 if (delta > 0) { |
426 UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta); | 446 UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta); |
427 if (delta > kCommonRaceBasedCountMismatch) | 447 if (delta > kCommonRaceBasedCountMismatch) |
428 inconsistencies |= COUNT_HIGH_ERROR; | 448 inconsistencies |= COUNT_HIGH_ERROR; |
429 } else { | 449 } else { |
430 DCHECK_GT(0, delta); | 450 DCHECK_GT(0, delta); |
431 UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta); | 451 UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta); |
432 if (-delta > kCommonRaceBasedCountMismatch) | 452 if (-delta > kCommonRaceBasedCountMismatch) |
433 inconsistencies |= COUNT_LOW_ERROR; | 453 inconsistencies |= COUNT_LOW_ERROR; |
434 } | 454 } |
435 } | 455 } |
436 return static_cast<Inconsistencies>(inconsistencies); | 456 return static_cast<Inconsistencies>(inconsistencies); |
437 } | 457 } |
438 | 458 |
439 Histogram::ClassType Histogram::histogram_type() const { | 459 Histogram::ClassType Histogram::histogram_type() const { |
440 return HISTOGRAM; | 460 return HISTOGRAM; |
441 } | 461 } |
442 | 462 |
443 Sample Histogram::ranges(size_t i) const { | 463 Sample Histogram::ranges(size_t i) const { |
444 return bucket_ranges_->range(i); | 464 return bucket_ranges_->range(i); |
445 } | 465 } |
446 | 466 |
447 size_t Histogram::bucket_count() const { | 467 size_t Histogram::bucket_count() const { |
448 return bucket_count_; | 468 return bucket_count_; |
449 } | 469 } |
450 | 470 |
451 // Do a safe atomic snapshot of sample data. | 471 scoped_ptr<BucketHistogramSamples> Histogram::SnapshotSamples() const { |
452 // This implementation assumes we are on a safe single thread. | 472 scoped_ptr<BucketHistogramSamples> samples( |
453 void Histogram::SnapshotSample(SampleSet* sample) const { | 473 new BucketHistogramSamples(bucket_ranges())); |
454 // Note locking not done in this version!!! | 474 samples->Add(*samples_); |
Ilya Sherman
2012/08/29 08:48:16
nit: Any reason not to preserve this comment?
kaiwang
2012/08/29 22:42:16
Lock is never used in Histograms and there's no pl
| |
455 *sample = sample_; | 475 return samples.Pass(); |
456 } | 476 } |
457 | 477 |
458 bool Histogram::HasConstructionArguments(Sample minimum, | 478 bool Histogram::HasConstructionArguments(Sample minimum, |
459 Sample maximum, | 479 Sample maximum, |
460 size_t bucket_count) { | 480 size_t bucket_count) { |
461 return ((minimum == declared_min_) && (maximum == declared_max_) && | 481 return ((minimum == declared_min_) && (maximum == declared_max_) && |
462 (bucket_count == bucket_count_)); | 482 (bucket_count == bucket_count_)); |
463 } | 483 } |
464 | 484 |
465 Histogram::Histogram(const string& name, | 485 Histogram::Histogram(const string& name, |
466 Sample minimum, | 486 Sample minimum, |
467 Sample maximum, | 487 Sample maximum, |
468 size_t bucket_count, | 488 size_t bucket_count, |
469 const BucketRanges* ranges) | 489 const BucketRanges* ranges) |
470 : HistogramBase(name), | 490 : HistogramBase(name), |
471 bucket_ranges_(ranges), | 491 bucket_ranges_(ranges), |
472 declared_min_(minimum), | 492 declared_min_(minimum), |
473 declared_max_(maximum), | 493 declared_max_(maximum), |
474 bucket_count_(bucket_count), | 494 bucket_count_(bucket_count) { |
475 sample_(bucket_count) {} | 495 if (ranges) |
496 samples_.reset(new BucketHistogramSamples(ranges)); | |
497 } | |
476 | 498 |
477 Histogram::~Histogram() { | 499 Histogram::~Histogram() { |
478 if (StatisticsRecorder::dump_on_exit()) { | 500 if (StatisticsRecorder::dump_on_exit()) { |
479 string output; | 501 string output; |
480 WriteAsciiImpl(true, "\n", &output); | 502 WriteAsciiImpl(true, "\n", &output); |
481 DLOG(INFO) << output; | 503 DLOG(INFO) << output; |
482 } | 504 } |
483 } | 505 } |
484 | 506 |
485 // static | 507 // static |
(...skipping 26 matching lines...) Expand all Loading... | |
512 } | 534 } |
513 | 535 |
514 bool Histogram::SerializeRanges(Pickle* pickle) const { | 536 bool Histogram::SerializeRanges(Pickle* pickle) const { |
515 return true; | 537 return true; |
516 } | 538 } |
517 | 539 |
518 bool Histogram::PrintEmptyBucket(size_t index) const { | 540 bool Histogram::PrintEmptyBucket(size_t index) const { |
519 return true; | 541 return true; |
520 } | 542 } |
521 | 543 |
522 size_t Histogram::BucketIndex(Sample value) const { | |
523 // Use simple binary search. This is very general, but there are better | |
524 // approaches if we knew that the buckets were linearly distributed. | |
525 DCHECK_LE(ranges(0), value); | |
526 DCHECK_GT(ranges(bucket_count()), value); | |
527 size_t under = 0; | |
528 size_t over = bucket_count(); | |
529 size_t mid; | |
530 | |
531 do { | |
532 DCHECK_GE(over, under); | |
Ilya Sherman
2012/08/29 08:48:16
Why did you remove this DCHECK?
kaiwang
2012/09/07 01:14:11
Done.
| |
533 mid = under + (over - under)/2; | |
534 if (mid == under) | |
535 break; | |
536 if (ranges(mid) <= value) | |
537 under = mid; | |
538 else | |
539 over = mid; | |
540 } while (true); | |
541 | |
542 DCHECK_LE(ranges(mid), value); | |
543 CHECK_GT(ranges(mid+1), value); | |
Ilya Sherman
2012/08/29 08:48:16
Why did you remove these checks?
kaiwang
2012/08/29 22:42:16
For above checks, they are guaranteed by current c
Ilya Sherman
2012/08/29 23:41:27
The purpose of DCHECKs, among other things, is to
kaiwang
2012/08/30 03:13:22
A lot of DCHECKs actually reduce the code readabil
Ilya Sherman
2012/08/30 07:57:39
I agree that some DCHECKs are unhelpful, but I dis
kaiwang
2012/09/07 01:14:11
Done.
| |
544 return mid; | |
545 } | |
546 | |
547 // Use the actual bucket widths (like a linear histogram) until the widths get | 544 // Use the actual bucket widths (like a linear histogram) until the widths get |
548 // over some transition value, and then use that transition width. Exponentials | 545 // over some transition value, and then use that transition width. Exponentials |
549 // get so big so fast (and we don't expect to see a lot of entries in the large | 546 // get so big so fast (and we don't expect to see a lot of entries in the large |
550 // buckets), so we need this to make it possible to see what is going on and | 547 // buckets), so we need this to make it possible to see what is going on and |
551 // not have 0-graphical-height buckets. | 548 // not have 0-graphical-height buckets. |
552 double Histogram::GetBucketSize(Count current, size_t i) const { | 549 double Histogram::GetBucketSize(Count current, size_t i) const { |
553 DCHECK_GT(ranges(i + 1), ranges(i)); | 550 DCHECK_GT(ranges(i + 1), ranges(i)); |
554 static const double kTransitionWidth = 5; | 551 static const double kTransitionWidth = 5; |
555 double denominator = ranges(i + 1) - ranges(i); | 552 double denominator = ranges(i + 1) - ranges(i); |
556 if (denominator > kTransitionWidth) | 553 if (denominator > kTransitionWidth) |
557 denominator = kTransitionWidth; // Stop trying to normalize. | 554 denominator = kTransitionWidth; // Stop trying to normalize. |
558 return current/denominator; | 555 return current/denominator; |
559 } | 556 } |
560 | 557 |
561 const string Histogram::GetAsciiBucketRange(size_t i) const { | 558 const string Histogram::GetAsciiBucketRange(size_t i) const { |
562 string result; | 559 string result; |
563 if (kHexRangePrintingFlag & flags()) | 560 if (kHexRangePrintingFlag & flags()) |
564 StringAppendF(&result, "%#x", ranges(i)); | 561 StringAppendF(&result, "%#x", ranges(i)); |
565 else | 562 else |
566 StringAppendF(&result, "%d", ranges(i)); | 563 StringAppendF(&result, "%d", ranges(i)); |
567 return result; | 564 return result; |
568 } | 565 } |
569 | 566 |
570 // Update histogram data with new sample. | |
571 void Histogram::Accumulate(Sample value, Count count, size_t index) { | |
572 // Note locking not done in this version!!! | |
573 sample_.Accumulate(value, count, index); | |
574 } | |
575 | |
576 //------------------------------------------------------------------------------ | 567 //------------------------------------------------------------------------------ |
577 // Private methods | 568 // Private methods |
578 | 569 |
579 void Histogram::WriteAsciiImpl(bool graph_it, | 570 void Histogram::WriteAsciiImpl(bool graph_it, |
580 const string& newline, | 571 const string& newline, |
581 string* output) const { | 572 string* output) const { |
582 // Get local (stack) copies of all effectively volatile class data so that we | 573 // Get local (stack) copies of all effectively volatile class data so that we |
583 // are consistent across our output activities. | 574 // are consistent across our output activities. |
584 SampleSet snapshot; | 575 scoped_ptr<BucketHistogramSamples> snapshot = SnapshotSamples(); |
585 SnapshotSample(&snapshot); | 576 Count sample_count = snapshot->TotalCount(); |
586 Count sample_count = snapshot.TotalCount(); | |
587 | 577 |
588 WriteAsciiHeader(snapshot, sample_count, output); | 578 WriteAsciiHeader(*snapshot, sample_count, output); |
589 output->append(newline); | 579 output->append(newline); |
590 | 580 |
591 // Prepare to normalize graphical rendering of bucket contents. | 581 // Prepare to normalize graphical rendering of bucket contents. |
592 double max_size = 0; | 582 double max_size = 0; |
593 if (graph_it) | 583 if (graph_it) |
594 max_size = GetPeakBucketSize(snapshot); | 584 max_size = GetPeakBucketSize(*snapshot); |
595 | 585 |
596 // Calculate space needed to print bucket range numbers. Leave room to print | 586 // Calculate space needed to print bucket range numbers. Leave room to print |
597 // nearly the largest bucket range without sliding over the histogram. | 587 // nearly the largest bucket range without sliding over the histogram. |
598 size_t largest_non_empty_bucket = bucket_count() - 1; | 588 size_t largest_non_empty_bucket = bucket_count() - 1; |
599 while (0 == snapshot.counts(largest_non_empty_bucket)) { | 589 while (0 == snapshot->GetCountFromBucketIndex(largest_non_empty_bucket)) { |
600 if (0 == largest_non_empty_bucket) | 590 if (0 == largest_non_empty_bucket) |
601 break; // All buckets are empty. | 591 break; // All buckets are empty. |
602 --largest_non_empty_bucket; | 592 --largest_non_empty_bucket; |
603 } | 593 } |
604 | 594 |
605 // Calculate largest print width needed for any of our bucket range displays. | 595 // Calculate largest print width needed for any of our bucket range displays. |
606 size_t print_width = 1; | 596 size_t print_width = 1; |
607 for (size_t i = 0; i < bucket_count(); ++i) { | 597 for (size_t i = 0; i < bucket_count(); ++i) { |
608 if (snapshot.counts(i)) { | 598 if (snapshot->GetCountFromBucketIndex(i)) { |
609 size_t width = GetAsciiBucketRange(i).size() + 1; | 599 size_t width = GetAsciiBucketRange(i).size() + 1; |
610 if (width > print_width) | 600 if (width > print_width) |
611 print_width = width; | 601 print_width = width; |
612 } | 602 } |
613 } | 603 } |
614 | 604 |
615 int64 remaining = sample_count; | 605 int64 remaining = sample_count; |
616 int64 past = 0; | 606 int64 past = 0; |
617 // Output the actual histogram graph. | 607 // Output the actual histogram graph. |
618 for (size_t i = 0; i < bucket_count(); ++i) { | 608 for (size_t i = 0; i < bucket_count(); ++i) { |
619 Count current = snapshot.counts(i); | 609 Count current = snapshot->GetCountFromBucketIndex(i); |
620 if (!current && !PrintEmptyBucket(i)) | 610 if (!current && !PrintEmptyBucket(i)) |
621 continue; | 611 continue; |
622 remaining -= current; | 612 remaining -= current; |
623 string range = GetAsciiBucketRange(i); | 613 string range = GetAsciiBucketRange(i); |
624 output->append(range); | 614 output->append(range); |
625 for (size_t j = 0; range.size() + j < print_width + 1; ++j) | 615 for (size_t j = 0; range.size() + j < print_width + 1; ++j) |
626 output->push_back(' '); | 616 output->push_back(' '); |
627 if (0 == current && i < bucket_count() - 1 && 0 == snapshot.counts(i + 1)) { | 617 if (0 == current && i < bucket_count() - 1 && |
628 while (i < bucket_count() - 1 && 0 == snapshot.counts(i + 1)) | 618 0 == snapshot->GetCountFromBucketIndex(i + 1)) { |
619 while (i < bucket_count() - 1 && | |
620 0 == snapshot->GetCountFromBucketIndex(i + 1)) { | |
629 ++i; | 621 ++i; |
622 } | |
630 output->append("... "); | 623 output->append("... "); |
631 output->append(newline); | 624 output->append(newline); |
632 continue; // No reason to plot emptiness. | 625 continue; // No reason to plot emptiness. |
633 } | 626 } |
634 double current_size = GetBucketSize(current, i); | 627 double current_size = GetBucketSize(current, i); |
635 if (graph_it) | 628 if (graph_it) |
636 WriteAsciiBucketGraph(current_size, max_size, output); | 629 WriteAsciiBucketGraph(current_size, max_size, output); |
637 WriteAsciiBucketContext(past, current, remaining, i, output); | 630 WriteAsciiBucketContext(past, current, remaining, i, output); |
638 output->append(newline); | 631 output->append(newline); |
639 past += current; | 632 past += current; |
640 } | 633 } |
641 DCHECK_EQ(sample_count, past); | 634 DCHECK_EQ(sample_count, past); |
642 } | 635 } |
643 | 636 |
644 double Histogram::GetPeakBucketSize(const SampleSet& snapshot) const { | 637 double Histogram::GetPeakBucketSize( |
638 const BucketHistogramSamples& samples) const { | |
645 double max = 0; | 639 double max = 0; |
646 for (size_t i = 0; i < bucket_count() ; ++i) { | 640 for (size_t i = 0; i < bucket_count() ; ++i) { |
647 double current_size = GetBucketSize(snapshot.counts(i), i); | 641 double current_size = GetBucketSize(samples.GetCountFromBucketIndex(i), i); |
648 if (current_size > max) | 642 if (current_size > max) |
649 max = current_size; | 643 max = current_size; |
650 } | 644 } |
651 return max; | 645 return max; |
652 } | 646 } |
653 | 647 |
654 void Histogram::WriteAsciiHeader(const SampleSet& snapshot, | 648 void Histogram::WriteAsciiHeader(const BucketHistogramSamples& samples, |
655 Count sample_count, | 649 Count sample_count, |
656 string* output) const { | 650 string* output) const { |
657 StringAppendF(output, | 651 StringAppendF(output, |
658 "Histogram: %s recorded %d samples", | 652 "Histogram: %s recorded %d samples", |
659 histogram_name().c_str(), | 653 histogram_name().c_str(), |
660 sample_count); | 654 sample_count); |
661 if (0 == sample_count) { | 655 if (0 == sample_count) { |
662 DCHECK_EQ(snapshot.sum(), 0); | 656 DCHECK_EQ(samples.sum(), 0); |
663 } else { | 657 } else { |
664 double average = static_cast<float>(snapshot.sum()) / sample_count; | 658 double average = static_cast<float>(samples.sum()) / sample_count; |
665 | 659 |
666 StringAppendF(output, ", average = %.1f", average); | 660 StringAppendF(output, ", average = %.1f", average); |
667 } | 661 } |
668 if (flags() & ~kHexRangePrintingFlag) | 662 if (flags() & ~kHexRangePrintingFlag) |
669 StringAppendF(output, " (flags = 0x%x)", flags() & ~kHexRangePrintingFlag); | 663 StringAppendF(output, " (flags = 0x%x)", flags() & ~kHexRangePrintingFlag); |
670 } | 664 } |
671 | 665 |
672 void Histogram::WriteAsciiBucketContext(const int64 past, | 666 void Histogram::WriteAsciiBucketContext(const int64 past, |
673 const Count current, | 667 const Count current, |
674 const int64 remaining, | 668 const int64 remaining, |
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
957 | 951 |
958 BucketRanges* bucket_ranges = new BucketRanges(ranges.size()); | 952 BucketRanges* bucket_ranges = new BucketRanges(ranges.size()); |
959 for (size_t i = 0; i < ranges.size(); i++) { | 953 for (size_t i = 0; i < ranges.size(); i++) { |
960 bucket_ranges->set_range(i, ranges[i]); | 954 bucket_ranges->set_range(i, ranges[i]); |
961 } | 955 } |
962 bucket_ranges->ResetChecksum(); | 956 bucket_ranges->ResetChecksum(); |
963 return bucket_ranges; | 957 return bucket_ranges; |
964 } | 958 } |
965 | 959 |
966 } // namespace base | 960 } // namespace base |
OLD | NEW |