Index: base/metrics/histogram_persistence.cc |
diff --git a/base/metrics/histogram_persistence.cc b/base/metrics/histogram_persistence.cc |
index 7c60079ef9ced823ea06d16470e5ff14780ed7a4..19ca3a2fe9bd6f4dabf1b4e64452c3e058265ba4 100644 |
--- a/base/metrics/histogram_persistence.cc |
+++ b/base/metrics/histogram_persistence.cc |
@@ -73,6 +73,7 @@ struct PersistentHistogramData { |
uint32_t ranges_checksum; |
PersistentMemoryAllocator::Reference counts_ref; |
HistogramSamples::Metadata samples_metadata; |
+ HistogramSamples::Metadata logged_metadata; |
// Space for the histogram name will be added during the actual allocation |
// request. This must be the last field of the structure. A zero-size array |
@@ -108,6 +109,22 @@ BucketRanges* CreateRangesFromData(HistogramBase::Sample* ranges_data, |
return ranges.release(); |
} |
+// Calculate the number of bytes required to store all of a histogram's |
+// "counts". |
Alexei Svitkine (slow)
2016/02/17 16:21:12
Add a sentence about why this returns 0. Maybe jus
bcwhite
2016/02/17 17:58:20
Done.
|
+uint32_t RequiredCountsBytes(size_t bucket_count) { |
Alexei Svitkine (slow)
2016/02/17 16:21:12
Nit: Get* or Calculate*
bcwhite
2016/02/17 17:58:20
Done.
|
+ // 2 because each "sample count" also requires a backup "logged count" |
+ // used for calculating the delta during snapshot operations. |
+ const unsigned kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount); |
+ |
+ // If the |bucket_count| is such that it would overflow the return type, |
+ // perhaps as the result of a milicious actor, then return zero to |
Alexei Svitkine (slow)
2016/02/17 16:21:12
Nit: malicious
bcwhite
2016/02/17 17:58:20
Done.
|
+ // indicate the problem to the caller. |
+ if (bucket_count > std::numeric_limits<uint32_t>::max() / kBytesPerBucket) |
+ return 0; |
+ |
+ return static_cast<uint32_t>(bucket_count * kBytesPerBucket); |
+} |
+ |
} // namespace |
const Feature kPersistentHistogramsFeature{ |
@@ -220,14 +237,20 @@ HistogramBase* CreatePersistentHistogram( |
HistogramBase::AtomicCount* counts_data = |
allocator->GetAsObject<HistogramBase::AtomicCount>( |
histogram_data.counts_ref, kTypeIdCountsArray); |
- if (!counts_data || |
- allocator->GetAllocSize(histogram_data.counts_ref) < |
- histogram_data.bucket_count * sizeof(HistogramBase::AtomicCount)) { |
+ size_t counts_bytes = RequiredCountsBytes(histogram_data.bucket_count); |
+ if (!counts_data || !counts_bytes || |
+ allocator->GetAllocSize(histogram_data.counts_ref) < counts_bytes) { |
RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY); |
NOTREACHED(); |
return nullptr; |
} |
+ // After the main "counts" array is a second array using for storing what |
+ // was previously logged. This is used to calculate the "delta" during |
+ // snapshot operations. |
+ HistogramBase::AtomicCount* logged_data = |
+ counts_data + histogram_data.bucket_count; |
+ |
std::string name(histogram_data_ptr->name); |
HistogramBase* histogram = nullptr; |
switch (histogram_data.histogram_type) { |
@@ -238,8 +261,10 @@ HistogramBase* CreatePersistentHistogram( |
histogram_data.maximum, |
ranges, |
counts_data, |
+ logged_data, |
histogram_data.bucket_count, |
- &histogram_data_ptr->samples_metadata); |
+ &histogram_data_ptr->samples_metadata, |
+ &histogram_data_ptr->logged_metadata); |
DCHECK(histogram); |
break; |
case LINEAR_HISTOGRAM: |
@@ -249,8 +274,10 @@ HistogramBase* CreatePersistentHistogram( |
histogram_data.maximum, |
ranges, |
counts_data, |
+ logged_data, |
histogram_data.bucket_count, |
- &histogram_data_ptr->samples_metadata); |
+ &histogram_data_ptr->samples_metadata, |
+ &histogram_data_ptr->logged_metadata); |
DCHECK(histogram); |
break; |
case BOOLEAN_HISTOGRAM: |
@@ -258,7 +285,9 @@ HistogramBase* CreatePersistentHistogram( |
name, |
ranges, |
counts_data, |
- &histogram_data_ptr->samples_metadata); |
+ logged_data, |
+ &histogram_data_ptr->samples_metadata, |
+ &histogram_data_ptr->logged_metadata); |
DCHECK(histogram); |
break; |
case CUSTOM_HISTOGRAM: |
@@ -266,8 +295,10 @@ HistogramBase* CreatePersistentHistogram( |
name, |
ranges, |
counts_data, |
+ logged_data, |
histogram_data.bucket_count, |
- &histogram_data_ptr->samples_metadata); |
+ &histogram_data_ptr->samples_metadata, |
+ &histogram_data_ptr->logged_metadata); |
DCHECK(histogram); |
break; |
default: |
@@ -342,16 +373,14 @@ HistogramBase* AllocatePersistentHistogram( |
if (!allocator) |
return nullptr; |
+ // If RequiredCountsBytes() returns zero then the bucket_count was not valid. |
size_t bucket_count = bucket_ranges->bucket_count(); |
- // An overflow such as this, perhaps as the result of a milicious actor, |
- // could lead to writing beyond the allocation boundary and into other |
- // memory. Just fail the allocation and let the caller deal with it. |
- if (bucket_count > std::numeric_limits<int32_t>::max() / |
- sizeof(HistogramBase::AtomicCount)) { |
+ size_t counts_bytes = RequiredCountsBytes(bucket_count); |
Alexei Svitkine (slow)
2016/02/17 16:21:12
Make the function return size_t instead of uint32_
bcwhite
2016/02/17 17:58:20
Done.
|
+ if (!counts_bytes) { |
NOTREACHED(); |
return nullptr; |
} |
- size_t counts_bytes = bucket_count * sizeof(HistogramBase::AtomicCount); |
+ |
size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample); |
PersistentMemoryAllocator::Reference ranges_ref = |
allocator->Allocate(ranges_bytes, kTypeIdRangesArray); |