Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(452)

Unified Diff: base/metrics/histogram.cc

Issue 4349002: Revert 64687 - Try to detect internal corruption of histogram instances.... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 10 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « base/metrics/histogram.h ('k') | base/metrics/histogram_unittest.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: base/metrics/histogram.cc
===================================================================
--- base/metrics/histogram.cc (revision 64845)
+++ base/metrics/histogram.cc (working copy)
@@ -61,7 +61,6 @@
bucket_count_(bucket_count),
flags_(kNoFlags),
ranges_(bucket_count + 1, 0),
- range_checksum_(0),
sample_() {
Initialize();
}
@@ -74,7 +73,6 @@
bucket_count_(bucket_count),
flags_(kNoFlags),
ranges_(bucket_count + 1, 0),
- range_checksum_(0),
sample_() {
Initialize();
}
@@ -88,7 +86,6 @@
// Just to make sure most derived class did this properly...
DCHECK(ValidateBucketRanges());
- DCHECK(HasValidRangeChecksum());
}
bool Histogram::PrintEmptyBucket(size_t index) const {
@@ -221,7 +218,7 @@
// We have to be careful that we don't pick a ratio between starting points in
// consecutive buckets that is sooo small, that the integer bounds are the same
// (effectively making one bucket get no values). We need to avoid:
-// ranges_[i] == ranges_[i + 1]
+// (ranges_[i] == ranges_[i + 1]
// To avoid that, we just do a fine-grained bucket width as far as we need to
// until we get a ratio that moves us along at least 2 units at a time. From
// that bucket onward we do use the exponential growth of buckets.
@@ -247,7 +244,6 @@
++current; // Just do a narrow bucket, and keep trying.
SetBucketRange(bucket_index, current);
}
- ResetRangeChecksum();
DCHECK_EQ(bucket_count(), bucket_index);
}
@@ -291,23 +287,6 @@
return current/denominator;
}
-void Histogram::ResetRangeChecksum() {
- range_checksum_ = CalculateRangeChecksum();
-}
-
-bool Histogram::HasValidRangeChecksum() const {
- return CalculateRangeChecksum() == range_checksum_;
-}
-
-Histogram::Sample Histogram::CalculateRangeChecksum() const {
- DCHECK_EQ(ranges_.size(), bucket_count() + 1);
- Sample checksum = 0;
- for (size_t index = 0; index < bucket_count(); ++index) {
- checksum += ranges(index);
- }
- return checksum;
-}
-
//------------------------------------------------------------------------------
// The following two methods can be overridden to provide a thread safe
// version of this class. The cost of locking is low... but an error in each
@@ -438,7 +417,6 @@
pickle.WriteInt(histogram.declared_min());
pickle.WriteInt(histogram.declared_max());
pickle.WriteSize(histogram.bucket_count());
- pickle.WriteInt(histogram.range_checksum());
pickle.WriteInt(histogram.histogram_type());
pickle.WriteInt(histogram.flags());
@@ -454,21 +432,19 @@
Pickle pickle(histogram_info.data(),
static_cast<int>(histogram_info.size()));
- std::string histogram_name;
+ void* iter = NULL;
+ size_t bucket_count;
int declared_min;
int declared_max;
- size_t bucket_count;
- int range_checksum;
int histogram_type;
int pickle_flags;
+ std::string histogram_name;
SampleSet sample;
- void* iter = NULL;
if (!pickle.ReadString(&iter, &histogram_name) ||
!pickle.ReadInt(&iter, &declared_min) ||
!pickle.ReadInt(&iter, &declared_max) ||
!pickle.ReadSize(&iter, &bucket_count) ||
- !pickle.ReadInt(&iter, &range_checksum) ||
!pickle.ReadInt(&iter, &histogram_type) ||
!pickle.ReadInt(&iter, &pickle_flags) ||
!sample.Histogram::SampleSet::Deserialize(&iter, pickle)) {
@@ -507,7 +483,6 @@
DCHECK_EQ(render_histogram->declared_min(), declared_min);
DCHECK_EQ(render_histogram->declared_max(), declared_max);
DCHECK_EQ(render_histogram->bucket_count(), bucket_count);
- DCHECK_EQ(render_histogram->range_checksum(), range_checksum);
DCHECK_EQ(render_histogram->histogram_type(), histogram_type);
if (render_histogram->flags() & kIPCSerializationSourceFlag) {
@@ -522,64 +497,13 @@
}
//------------------------------------------------------------------------------
-// Methods for the validating a sample and a related histogram.
-//------------------------------------------------------------------------------
-
-Histogram::Inconsistencies Histogram::FindCorruption(
- const SampleSet& snapshot) const {
- int inconsistencies = NO_INCONSISTENCIES;
- Sample previous_range = -1; // Bottom range is always 0.
- Sample checksum = 0;
- int64 count = 0;
- for (size_t index = 0; index < bucket_count(); ++index) {
- count += snapshot.counts(index);
- int new_range = ranges(index);
- checksum += new_range;
- if (previous_range >= new_range)
- inconsistencies |= BUCKET_ORDER_ERROR;
- previous_range = new_range;
- }
-
- if (checksum != range_checksum_)
- inconsistencies |= RANGE_CHECKSUM_ERROR;
-
- int64 delta64 = snapshot.redundant_count() - count;
- if (delta64 != 0) {
- int delta = static_cast<int>(delta64);
- if (delta != delta64)
- delta = INT_MAX; // Flag all giant errors as INT_MAX.
- // Since snapshots of histograms are taken asynchronously relative to
- // sampling (and snapped from different threads), it is pretty likely that
- // we'll catch a redundant count that doesn't match the sample count. We
- // allow for a certain amount of slop before flagging this as an
- // inconsistency. Even with an inconsistency, we'll snapshot it again (for
- // UMA in about a half hour, so we'll eventually get the data, if it was
- // not the result of a corruption. If histograms show that 1 is "too tight"
- // then we may try to use 2 or 3 for this slop value.
- const int kCommonRaceBasedCountMismatch = 1;
- if (delta > 0) {
- UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta);
- if (delta > kCommonRaceBasedCountMismatch)
- inconsistencies |= COUNT_HIGH_ERROR;
- } else {
- DCHECK_GT(0, delta);
- UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta);
- if (-delta > kCommonRaceBasedCountMismatch)
- inconsistencies |= COUNT_LOW_ERROR;
- }
- }
- return static_cast<Inconsistencies>(inconsistencies);
-}
-
-//------------------------------------------------------------------------------
// Methods for the Histogram::SampleSet class
//------------------------------------------------------------------------------
Histogram::SampleSet::SampleSet()
: counts_(),
sum_(0),
- square_sum_(0),
- redundant_count_(0) {
+ square_sum_(0) {
}
Histogram::SampleSet::~SampleSet() {
@@ -600,11 +524,9 @@
counts_[index] += count;
sum_ += count * value;
square_sum_ += (count * value) * static_cast<int64>(value);
- redundant_count_ += count;
DCHECK_GE(counts_[index], 0);
DCHECK_GE(sum_, 0);
DCHECK_GE(square_sum_, 0);
- DCHECK_GE(redundant_count_, 0);
}
Count Histogram::SampleSet::TotalCount() const {
@@ -614,7 +536,6 @@
++it) {
total += *it;
}
- DCHECK_EQ(total, redundant_count_);
return total;
}
@@ -622,7 +543,6 @@
DCHECK_EQ(counts_.size(), other.counts_.size());
sum_ += other.sum_;
square_sum_ += other.square_sum_;
- redundant_count_ += other.redundant_count_;
for (size_t index = 0; index < counts_.size(); ++index)
counts_[index] += other.counts_[index];
}
@@ -634,7 +554,6 @@
// calculated). As a result, we don't currently CHCEK() for positive values.
sum_ -= other.sum_;
square_sum_ -= other.square_sum_;
- redundant_count_ -= other.redundant_count_;
for (size_t index = 0; index < counts_.size(); ++index) {
counts_[index] -= other.counts_[index];
DCHECK_GE(counts_[index], 0);
@@ -644,7 +563,6 @@
bool Histogram::SampleSet::Serialize(Pickle* pickle) const {
pickle->WriteInt64(sum_);
pickle->WriteInt64(square_sum_);
- pickle->WriteInt64(redundant_count_);
pickle->WriteSize(counts_.size());
for (size_t index = 0; index < counts_.size(); ++index) {
@@ -658,13 +576,11 @@
DCHECK_EQ(counts_.size(), 0u);
DCHECK_EQ(sum_, 0);
DCHECK_EQ(square_sum_, 0);
- DCHECK_EQ(redundant_count_, 0);
size_t counts_size;
if (!pickle.ReadInt64(iter, &sum_) ||
!pickle.ReadInt64(iter, &square_sum_) ||
- !pickle.ReadInt64(iter, &redundant_count_) ||
!pickle.ReadSize(iter, &counts_size)) {
return false;
}
@@ -672,16 +588,14 @@
if (counts_size == 0)
return false;
- int count = 0;
for (size_t index = 0; index < counts_size; ++index) {
int i;
if (!pickle.ReadInt(iter, &i))
return false;
counts_.push_back(i);
- count += i;
}
- DCHECK_EQ(count, redundant_count_);
- return count == redundant_count_;
+
+ return true;
}
//------------------------------------------------------------------------------
@@ -780,7 +694,6 @@
(bucket_count() - 2);
SetBucketRange(i, static_cast<int> (linear_range + 0.5));
}
- ResetRangeChecksum();
}
double LinearHistogram::GetBucketSize(Count current, size_t i) const {
@@ -827,7 +740,7 @@
scoped_refptr<Histogram> CustomHistogram::FactoryGet(
const std::string& name,
- const std::vector<Sample>& custom_ranges,
+ const std::vector<int>& custom_ranges,
Flags flags) {
scoped_refptr<Histogram> histogram(NULL);
@@ -861,7 +774,7 @@
}
CustomHistogram::CustomHistogram(const std::string& name,
- const std::vector<Sample>& custom_ranges)
+ const std::vector<int>& custom_ranges)
: Histogram(name, custom_ranges[1], custom_ranges.back(),
custom_ranges.size()) {
DCHECK_GT(custom_ranges.size(), 1u);
@@ -876,7 +789,6 @@
DCHECK_LE(ranges_vector_->size(), bucket_count());
for (size_t index = 0; index < ranges_vector_->size(); ++index)
SetBucketRange(index, (*ranges_vector_)[index]);
- ResetRangeChecksum();
}
double CustomHistogram::GetBucketSize(Count current, size_t i) const {
« no previous file with comments | « base/metrics/histogram.h ('k') | base/metrics/histogram_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698