| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/metrics/persistent_histogram_allocator.h" | 5 #include "base/metrics/persistent_histogram_allocator.h" |
| 6 | 6 |
| 7 #include "base/lazy_instance.h" | 7 #include "base/lazy_instance.h" |
| 8 #include "base/logging.h" | 8 #include "base/logging.h" |
| 9 #include "base/memory/scoped_ptr.h" | 9 #include "base/memory/scoped_ptr.h" |
| 10 #include "base/metrics/histogram.h" | 10 #include "base/metrics/histogram.h" |
| 11 #include "base/metrics/histogram_base.h" | 11 #include "base/metrics/histogram_base.h" |
| 12 #include "base/metrics/histogram_samples.h" | 12 #include "base/metrics/histogram_samples.h" |
| 13 #include "base/metrics/sparse_histogram.h" |
| 13 #include "base/metrics/statistics_recorder.h" | 14 #include "base/metrics/statistics_recorder.h" |
| 14 #include "base/synchronization/lock.h" | 15 #include "base/synchronization/lock.h" |
| 15 | 16 |
| 16 // TODO(bcwhite): Order these methods to match the header file. The current | 17 // TODO(bcwhite): Order these methods to match the header file. The current |
| 17 // order is only temporary in order to aid review of the transition from | 18 // order is only temporary in order to aid review of the transition from |
| 18 // a non-class implementation. | 19 // a non-class implementation. |
| 19 | 20 |
| 20 namespace base { | 21 namespace base { |
| 21 | 22 |
| 22 namespace { | 23 namespace { |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 64 return nullptr; | 65 return nullptr; |
| 65 | 66 |
| 66 return ranges; | 67 return ranges; |
| 67 } | 68 } |
| 68 | 69 |
| 69 // Calculate the number of bytes required to store all of a histogram's | 70 // Calculate the number of bytes required to store all of a histogram's |
| 70 // "counts". This will return zero (0) if |bucket_count| is not valid. | 71 // "counts". This will return zero (0) if |bucket_count| is not valid. |
| 71 size_t CalculateRequiredCountsBytes(size_t bucket_count) { | 72 size_t CalculateRequiredCountsBytes(size_t bucket_count) { |
| 72 // 2 because each "sample count" also requires a backup "logged count" | 73 // 2 because each "sample count" also requires a backup "logged count" |
| 73 // used for calculating the delta during snapshot operations. | 74 // used for calculating the delta during snapshot operations. |
| 74 const unsigned kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount); | 75 const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount); |
| 75 | 76 |
| 76 // If the |bucket_count| is such that it would overflow the return type, | 77 // If the |bucket_count| is such that it would overflow the return type, |
| 77 // perhaps as the result of a malicious actor, then return zero to | 78 // perhaps as the result of a malicious actor, then return zero to |
| 78 // indicate the problem to the caller. | 79 // indicate the problem to the caller. |
| 79 if (bucket_count > std::numeric_limits<uint32_t>::max() / kBytesPerBucket) | 80 if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket) |
| 80 return 0; | 81 return 0; |
| 81 | 82 |
| 82 return bucket_count * kBytesPerBucket; | 83 return bucket_count * kBytesPerBucket; |
| 83 } | 84 } |
| 84 | 85 |
| 85 } // namespace | 86 } // namespace |
| 86 | 87 |
| 87 const Feature kPersistentHistogramsFeature{ | 88 const Feature kPersistentHistogramsFeature{ |
| 88 "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT | 89 "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT |
| 89 }; | 90 }; |
| (...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 269 | 270 |
| 270 // static | 271 // static |
| 271 scoped_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram( | 272 scoped_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram( |
| 272 PersistentHistogramData* histogram_data_ptr) { | 273 PersistentHistogramData* histogram_data_ptr) { |
| 273 if (!histogram_data_ptr) { | 274 if (!histogram_data_ptr) { |
| 274 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER); | 275 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER); |
| 275 NOTREACHED(); | 276 NOTREACHED(); |
| 276 return nullptr; | 277 return nullptr; |
| 277 } | 278 } |
| 278 | 279 |
| 280 // Sparse histograms are quite different so handle them as a special case. |
| 281 if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) { |
| 282 scoped_ptr<HistogramBase> histogram = SparseHistogram::PersistentCreate( |
| 283 memory_allocator(), histogram_data_ptr->name, |
| 284 &histogram_data_ptr->samples_metadata, |
| 285 &histogram_data_ptr->logged_metadata); |
| 286 DCHECK(histogram); |
| 287 histogram->SetFlags(histogram_data_ptr->flags); |
| 288 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS); |
| 289 return histogram; |
| 290 } |
| 291 |
| 279 // Copy the histogram_data to local storage because anything in persistent | 292 // Copy the histogram_data to local storage because anything in persistent |
| 280 // memory cannot be trusted as it could be changed at any moment by a | 293 // memory cannot be trusted as it could be changed at any moment by a |
| 281 // malicious actor that shares access. The contents of histogram_data are | 294 // malicious actor that shares access. The contents of histogram_data are |
| 282 // validated below; the local copy is to ensure that the contents cannot | 295 // validated below; the local copy is to ensure that the contents cannot |
| 283 // be externally changed between validation and use. | 296 // be externally changed between validation and use. |
| 284 PersistentHistogramData histogram_data = *histogram_data_ptr; | 297 PersistentHistogramData histogram_data = *histogram_data_ptr; |
| 285 | 298 |
| 286 HistogramBase::Sample* ranges_data = | 299 HistogramBase::Sample* ranges_data = |
| 287 memory_allocator_->GetAsObject<HistogramBase::Sample>( | 300 memory_allocator_->GetAsObject<HistogramBase::Sample>( |
| 288 histogram_data.ranges_ref, kTypeIdRangesArray); | 301 histogram_data.ranges_ref, kTypeIdRangesArray); |
| (...skipping 22 matching lines...) Expand all Loading... |
| 311 } | 324 } |
| 312 const BucketRanges* ranges = | 325 const BucketRanges* ranges = |
| 313 StatisticsRecorder::RegisterOrDeleteDuplicateRanges( | 326 StatisticsRecorder::RegisterOrDeleteDuplicateRanges( |
| 314 created_ranges.release()); | 327 created_ranges.release()); |
| 315 | 328 |
| 316 HistogramBase::AtomicCount* counts_data = | 329 HistogramBase::AtomicCount* counts_data = |
| 317 memory_allocator_->GetAsObject<HistogramBase::AtomicCount>( | 330 memory_allocator_->GetAsObject<HistogramBase::AtomicCount>( |
| 318 histogram_data.counts_ref, kTypeIdCountsArray); | 331 histogram_data.counts_ref, kTypeIdCountsArray); |
| 319 size_t counts_bytes = | 332 size_t counts_bytes = |
| 320 CalculateRequiredCountsBytes(histogram_data.bucket_count); | 333 CalculateRequiredCountsBytes(histogram_data.bucket_count); |
| 321 if (!counts_data || !counts_bytes || | 334 if (!counts_data || counts_bytes == 0 || |
| 322 memory_allocator_->GetAllocSize(histogram_data.counts_ref) < | 335 memory_allocator_->GetAllocSize(histogram_data.counts_ref) < |
| 323 counts_bytes) { | 336 counts_bytes) { |
| 324 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY); | 337 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY); |
| 325 NOTREACHED(); | 338 NOTREACHED(); |
| 326 return nullptr; | 339 return nullptr; |
| 327 } | 340 } |
| 328 | 341 |
| 329 // After the main "counts" array is a second array using for storing what | 342 // After the main "counts" array is a second array using for storing what |
| 330 // was previously logged. This is used to calculate the "delta" during | 343 // was previously logged. This is used to calculate the "delta" during |
| 331 // snapshot operations. | 344 // snapshot operations. |
| (...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 438 Reference* ref_ptr) { | 451 Reference* ref_ptr) { |
| 439 // If the allocator is corrupt, don't waste time trying anything else. | 452 // If the allocator is corrupt, don't waste time trying anything else. |
| 440 // This also allows differentiating on the dashboard between allocations | 453 // This also allows differentiating on the dashboard between allocations |
| 441 // failed due to a corrupt allocator and the number of process instances | 454 // failed due to a corrupt allocator and the number of process instances |
| 442 // with one, the latter being idicated by "newly corrupt", below. | 455 // with one, the latter being idicated by "newly corrupt", below. |
| 443 if (memory_allocator_->IsCorrupt()) { | 456 if (memory_allocator_->IsCorrupt()) { |
| 444 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT); | 457 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT); |
| 445 return nullptr; | 458 return nullptr; |
| 446 } | 459 } |
| 447 | 460 |
| 448 // If CalculateRequiredCountsBytes() returns zero then the bucket_count | 461 // Create the metadata necessary for a persistent sparse histogram. This |
| 449 // was not valid. | 462 // is done first because it is a small subset of what is required for |
| 450 size_t bucket_count = bucket_ranges->bucket_count(); | 463 // other histograms. |
| 451 size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count); | |
| 452 if (!counts_bytes) { | |
| 453 NOTREACHED(); | |
| 454 return nullptr; | |
| 455 } | |
| 456 | |
| 457 size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample); | |
| 458 PersistentMemoryAllocator::Reference ranges_ref = | |
| 459 memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray); | |
| 460 PersistentMemoryAllocator::Reference counts_ref = | |
| 461 memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray); | |
| 462 PersistentMemoryAllocator::Reference histogram_ref = | 464 PersistentMemoryAllocator::Reference histogram_ref = |
| 463 memory_allocator_->Allocate( | 465 memory_allocator_->Allocate( |
| 464 offsetof(PersistentHistogramData, name) + name.length() + 1, | 466 offsetof(PersistentHistogramData, name) + name.length() + 1, |
| 465 kTypeIdHistogram); | 467 kTypeIdHistogram); |
| 466 HistogramBase::Sample* ranges_data = | |
| 467 memory_allocator_->GetAsObject<HistogramBase::Sample>(ranges_ref, | |
| 468 kTypeIdRangesArray); | |
| 469 PersistentHistogramData* histogram_data = | 468 PersistentHistogramData* histogram_data = |
| 470 memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref, | 469 memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref, |
| 471 kTypeIdHistogram); | 470 kTypeIdHistogram); |
| 471 if (histogram_data) { |
| 472 memcpy(histogram_data->name, name.c_str(), name.size() + 1); |
| 473 histogram_data->histogram_type = histogram_type; |
| 474 histogram_data->flags = flags | HistogramBase::kIsPersistent; |
| 475 } |
| 472 | 476 |
| 473 // Only continue here if all allocations were successful. If they weren't, | 477 // Create the remaining metadata necessary for regular histograms. |
| 474 // there is no way to free the space but that's not really a problem since | 478 if (histogram_type != SPARSE_HISTOGRAM) { |
| 475 // the allocations only fail because the space is full or corrupt and so | 479 size_t bucket_count = bucket_ranges->bucket_count(); |
| 476 // any future attempts will also fail. | 480 size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count); |
| 477 if (counts_ref && ranges_data && histogram_data) { | 481 if (counts_bytes == 0) { |
| 478 strcpy(histogram_data->name, name.c_str()); | 482 // |bucket_count| was out-of-range. |
| 479 for (size_t i = 0; i < bucket_ranges->size(); ++i) | 483 NOTREACHED(); |
| 480 ranges_data[i] = bucket_ranges->range(i); | 484 return nullptr; |
| 485 } |
| 481 | 486 |
| 482 histogram_data->histogram_type = histogram_type; | 487 size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample); |
| 483 histogram_data->flags = flags; | 488 PersistentMemoryAllocator::Reference counts_ref = |
| 484 histogram_data->minimum = minimum; | 489 memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray); |
| 485 histogram_data->maximum = maximum; | 490 PersistentMemoryAllocator::Reference ranges_ref = |
| 486 histogram_data->bucket_count = static_cast<uint32_t>(bucket_count); | 491 memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray); |
| 487 histogram_data->ranges_ref = ranges_ref; | 492 HistogramBase::Sample* ranges_data = |
| 488 histogram_data->ranges_checksum = bucket_ranges->checksum(); | 493 memory_allocator_->GetAsObject<HistogramBase::Sample>( |
| 489 histogram_data->counts_ref = counts_ref; | 494 ranges_ref, kTypeIdRangesArray); |
| 490 | 495 |
| 496 // Only continue here if all allocations were successful. If they weren't, |
| 497 // there is no way to free the space but that's not really a problem since |
| 498 // the allocations only fail because the space is full or corrupt and so |
| 499 // any future attempts will also fail. |
| 500 if (counts_ref && ranges_data && histogram_data) { |
| 501 for (size_t i = 0; i < bucket_ranges->size(); ++i) |
| 502 ranges_data[i] = bucket_ranges->range(i); |
| 503 |
| 504 histogram_data->minimum = minimum; |
| 505 histogram_data->maximum = maximum; |
| 506 // |bucket_count| must fit within 32-bits or the allocation of the counts |
| 507 // array would have failed for being too large; the allocator supports |
| 508 // less than 4GB total size. |
| 509 histogram_data->bucket_count = static_cast<uint32_t>(bucket_count); |
| 510 histogram_data->ranges_ref = ranges_ref; |
| 511 histogram_data->ranges_checksum = bucket_ranges->checksum(); |
| 512 histogram_data->counts_ref = counts_ref; |
| 513 } else { |
| 514 histogram_data = nullptr; // Clear this for proper handling below. |
| 515 } |
| 516 } |
| 517 |
| 518 if (histogram_data) { |
| 491 // Create the histogram using resources in persistent memory. This ends up | 519 // Create the histogram using resources in persistent memory. This ends up |
| 492 // resolving the "ref" values stored in histogram_data instad of just | 520 // resolving the "ref" values stored in histogram_data instad of just |
| 493 // using what is already known above but avoids duplicating the switch | 521 // using what is already known above but avoids duplicating the switch |
| 494 // statement here and serves as a double-check that everything is | 522 // statement here and serves as a double-check that everything is |
| 495 // correct before commiting the new histogram to persistent space. | 523 // correct before commiting the new histogram to persistent space. |
| 496 scoped_ptr<HistogramBase> histogram = CreateHistogram(histogram_data); | 524 scoped_ptr<HistogramBase> histogram = CreateHistogram(histogram_data); |
| 497 DCHECK(histogram); | 525 DCHECK(histogram); |
| 498 if (ref_ptr != nullptr) | 526 if (ref_ptr != nullptr) |
| 499 *ref_ptr = histogram_ref; | 527 *ref_ptr = histogram_ref; |
| 500 | 528 |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 550 scoped_ptr<HistogramBase> histogram = | 578 scoped_ptr<HistogramBase> histogram = |
| 551 g_allocator->GetNextHistogramWithIgnore(&iter, last_created); | 579 g_allocator->GetNextHistogramWithIgnore(&iter, last_created); |
| 552 if (!histogram) | 580 if (!histogram) |
| 553 break; | 581 break; |
| 554 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release()); | 582 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release()); |
| 555 } | 583 } |
| 556 } | 584 } |
| 557 } | 585 } |
| 558 | 586 |
| 559 } // namespace base | 587 } // namespace base |
| OLD | NEW |