| Index: base/metrics/histogram_samples.cc
|
| diff --git a/base/metrics/histogram_samples.cc b/base/metrics/histogram_samples.cc
|
| index 992617ba87c880916b9eb3964e212320d4133cad..9e86a65258d1010a5b629163f56108e6f7e0748f 100644
|
| --- a/base/metrics/histogram_samples.cc
|
| +++ b/base/metrics/histogram_samples.cc
|
| @@ -4,13 +4,26 @@
|
|
|
| #include "base/metrics/histogram_samples.h"
|
|
|
| +#include <limits>
|
| +
|
| #include "base/compiler_specific.h"
|
| +#include "base/numerics/safe_math.h"
|
| #include "base/pickle.h"
|
|
|
| namespace base {
|
|
|
| namespace {
|
|
|
| +// A shorthand constant for the max value of size_t.
|
| +constexpr size_t kSizeMax = std::numeric_limits<size_t>::max();
|
| +
|
| +// A constant stored in an AtomicSingleSample (as_atomic) to indicate that the
|
| +// sample is "disabled" and no further accumulation should be done with it. The
|
| +// value is chosen such that it will be MAX_UINT16 for both |bucket| & |count|,
|
| +// and thus less likely to conflict with real use. Conflicts are explicitly
|
| +// handled in the code but it's worth making them as unlikely as possible.
|
| +constexpr int32_t kDisabledSingleSample = -1;
|
| +
|
| class SampleCountPickleIterator : public SampleCountIterator {
|
| public:
|
| explicit SampleCountPickleIterator(PickleIterator* iter);
|
| @@ -59,6 +72,97 @@ void SampleCountPickleIterator::Get(HistogramBase::Sample* min,
|
|
|
| } // namespace
|
|
|
| +static_assert(sizeof(HistogramSamples::AtomicSingleSample) ==
|
| + sizeof(subtle::Atomic32),
|
| + "AtomicSingleSample isn't 32 bits");
|
| +
|
| +HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Load()
|
| + const {
|
| + AtomicSingleSample single_sample = subtle::Acquire_Load(&as_atomic);
|
| +
|
| + // If the sample was extracted/disabled, it's still zero to the outside.
|
| + if (single_sample.as_atomic == kDisabledSingleSample)
|
| + single_sample.as_atomic = 0;
|
| +
|
| + return single_sample.as_parts;
|
| +}
|
| +
|
| +HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Extract(
|
| + bool disable) {
|
| + AtomicSingleSample single_sample = subtle::NoBarrier_AtomicExchange(
|
| + &as_atomic, disable ? kDisabledSingleSample : 0);
|
| + if (single_sample.as_atomic == kDisabledSingleSample)
|
| + single_sample.as_atomic = 0;
|
| + return single_sample.as_parts;
|
| +}
|
| +
|
| +bool HistogramSamples::AtomicSingleSample::Accumulate(
|
| + size_t bucket,
|
| + HistogramBase::Count count) {
|
| + if (count == 0)
|
| + return true;
|
| +
|
| + // Convert the parameters to 16-bit variables because it's all 16-bit below.
|
| + if (count < std::numeric_limits<uint16_t>::min() ||
|
| + count > std::numeric_limits<uint16_t>::max() ||
|
| + bucket > std::numeric_limits<uint16_t>::max()) {
|
| + return false;
|
| + }
|
| + uint16_t bucket16 = static_cast<uint16_t>(bucket);
|
| + uint16_t count16 = static_cast<uint16_t>(count);
|
| +
|
| + // A local, unshared copy of the single-sample is necessary so the parts
|
| + // can be manipulated without worrying about atomicity.
|
| + AtomicSingleSample single_sample;
|
| +
|
| + bool sample_updated;
|
| + do {
|
| + subtle::Atomic32 original = subtle::Acquire_Load(&as_atomic);
|
| + if (original == kDisabledSingleSample)
|
| + return false;
|
| + single_sample.as_atomic = original;
|
| + if (single_sample.as_atomic != 0) {
|
| + // Only the same bucket (parameter and stored) can be counted multiple
|
| + // times.
|
| + if (single_sample.as_parts.bucket != bucket16)
|
| + return false;
|
| + } else {
|
| + // The |single_ sample| was zero so becomes the |bucket| parameter, the
|
| + // contents of which were checked above to fit in 16 bits.
|
| + single_sample.as_parts.bucket = bucket16;
|
| + }
|
| +
|
| + // Update count, making sure that it doesn't overflow.
|
| + CheckedNumeric<uint16_t> new_count(single_sample.as_parts.count);
|
| + new_count += count16;
|
| + if (!new_count.AssignIfValid(&single_sample.as_parts.count))
|
| + return false;
|
| +
|
| + // Don't let this become equivalent to the "disabled" value.
|
| + if (single_sample.as_atomic == kDisabledSingleSample)
|
| + return false;
|
| +
|
| + // Store the updated single-sample back into memory. |existing| is what
|
| + // was in that memory location at the time of the call; if it doesn't
|
| + // match |original| then the swap didn't happen so loop again.
|
| + subtle::Atomic32 existing = subtle::Release_CompareAndSwap(
|
| + &as_atomic, original, single_sample.as_atomic);
|
| + sample_updated = (existing == original);
|
| + } while (!sample_updated);
|
| +
|
| + return true;
|
| +}
|
| +
|
| +bool HistogramSamples::AtomicSingleSample::IsDisabled() const {
|
| + return subtle::Acquire_Load(&as_atomic) == kDisabledSingleSample;
|
| +}
|
| +
|
| +HistogramSamples::LocalMetadata::LocalMetadata() {
|
| + // This is the same way it's done for persistent metadata since no ctor
|
| + // is called for the data members in that case.
|
| + memset(this, 0, sizeof(*this));
|
| +}
|
| +
|
| // Don't try to delegate behavior to the constructor below that accepts a
|
| // Matadata pointer by passing &local_meta_. Such cannot be reliably passed
|
| // because it has not yet been constructed -- no member variables have; the
|
| @@ -83,9 +187,7 @@ HistogramSamples::HistogramSamples(uint64_t id, Metadata* meta)
|
| HistogramSamples::~HistogramSamples() {}
|
|
|
| void HistogramSamples::Add(const HistogramSamples& other) {
|
| - IncreaseSum(other.sum());
|
| - subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
|
| - other.redundant_count());
|
| + IncreaseSumAndCount(other.sum(), other.redundant_count());
|
| std::unique_ptr<SampleCountIterator> it = other.Iterator();
|
| bool success = AddSubtractImpl(it.get(), ADD);
|
| DCHECK(success);
|
| @@ -98,18 +200,14 @@ bool HistogramSamples::AddFromPickle(PickleIterator* iter) {
|
| if (!iter->ReadInt64(&sum) || !iter->ReadInt(&redundant_count))
|
| return false;
|
|
|
| - IncreaseSum(sum);
|
| - subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
|
| - redundant_count);
|
| + IncreaseSumAndCount(sum, redundant_count);
|
|
|
| SampleCountPickleIterator pickle_iter(iter);
|
| return AddSubtractImpl(&pickle_iter, ADD);
|
| }
|
|
|
| void HistogramSamples::Subtract(const HistogramSamples& other) {
|
| - IncreaseSum(-other.sum());
|
| - subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count,
|
| - -other.redundant_count());
|
| + IncreaseSumAndCount(-other.sum(), -other.redundant_count());
|
| std::unique_ptr<SampleCountIterator> it = other.Iterator();
|
| bool success = AddSubtractImpl(it.get(), SUBTRACT);
|
| DCHECK(success);
|
| @@ -135,16 +233,25 @@ bool HistogramSamples::Serialize(Pickle* pickle) const {
|
| return true;
|
| }
|
|
|
| -void HistogramSamples::IncreaseSum(int64_t diff) {
|
| +bool HistogramSamples::AccumulateSingleSample(HistogramBase::Sample value,
|
| + HistogramBase::Count count,
|
| + size_t bucket) {
|
| + if (single_sample().Accumulate(bucket, count)) {
|
| + // Success. Update the (separate) sum and redundant-count.
|
| + IncreaseSumAndCount(static_cast<int64_t>(value) * count, count);
|
| + return true;
|
| + }
|
| + return false;
|
| +}
|
| +
|
| +void HistogramSamples::IncreaseSumAndCount(int64_t sum,
|
| + HistogramBase::Count count) {
|
| #ifdef ARCH_CPU_64_BITS
|
| - subtle::NoBarrier_AtomicIncrement(&meta_->sum, diff);
|
| + subtle::NoBarrier_AtomicIncrement(&meta_->sum, sum);
|
| #else
|
| - meta_->sum += diff;
|
| + meta_->sum += sum;
|
| #endif
|
| -}
|
| -
|
| -void HistogramSamples::IncreaseRedundantCount(HistogramBase::Count diff) {
|
| - subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count, diff);
|
| + subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count, count);
|
| }
|
|
|
| SampleCountIterator::~SampleCountIterator() {}
|
| @@ -154,4 +261,46 @@ bool SampleCountIterator::GetBucketIndex(size_t* index) const {
|
| return false;
|
| }
|
|
|
| +SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
|
| + HistogramBase::Sample max,
|
| + HistogramBase::Count count)
|
| + : SingleSampleIterator(min, max, count, kSizeMax) {}
|
| +
|
| +SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
|
| + HistogramBase::Sample max,
|
| + HistogramBase::Count count,
|
| + size_t bucket_index)
|
| + : min_(min), max_(max), bucket_index_(bucket_index), count_(count) {}
|
| +
|
| +SingleSampleIterator::~SingleSampleIterator() {}
|
| +
|
| +bool SingleSampleIterator::Done() const {
|
| + return count_ == 0;
|
| +}
|
| +
|
| +void SingleSampleIterator::Next() {
|
| + DCHECK(!Done());
|
| + count_ = 0;
|
| +}
|
| +
|
| +void SingleSampleIterator::Get(HistogramBase::Sample* min,
|
| + HistogramBase::Sample* max,
|
| + HistogramBase::Count* count) const {
|
| + DCHECK(!Done());
|
| + if (min != nullptr)
|
| + *min = min_;
|
| + if (max != nullptr)
|
| + *max = max_;
|
| + if (count != nullptr)
|
| + *count = count_;
|
| +}
|
| +
|
| +bool SingleSampleIterator::GetBucketIndex(size_t* index) const {
|
| + DCHECK(!Done());
|
| + if (bucket_index_ == kSizeMax)
|
| + return false;
|
| + *index = bucket_index_;
|
| + return true;
|
| +}
|
| +
|
| } // namespace base
|
|
|