| Index: base/metrics/persistent_sample_map.cc
|
| diff --git a/base/metrics/persistent_sample_map.cc b/base/metrics/persistent_sample_map.cc
|
| index 3bd447a533dbb7a305439369f3adb4ab06d7bdf3..cc20f45412bec2160f62dfb7aa9380e6b9a4caca 100644
|
| --- a/base/metrics/persistent_sample_map.cc
|
| +++ b/base/metrics/persistent_sample_map.cc
|
| @@ -6,6 +6,7 @@
|
|
|
| #include "base/logging.h"
|
| #include "base/memory/ptr_util.h"
|
| +#include "base/metrics/persistent_histogram_allocator.h"
|
| #include "base/stl_util.h"
|
|
|
| namespace base {
|
| @@ -93,19 +94,25 @@ const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1; // SHA1(SampleRecord) v1
|
|
|
| PersistentSampleMap::PersistentSampleMap(
|
| uint64_t id,
|
| - PersistentMemoryAllocator* allocator,
|
| + PersistentHistogramAllocator* allocator,
|
| Metadata* meta)
|
| - : HistogramSamples(id, meta),
|
| - allocator_(allocator),
|
| - sample_iter_(allocator) {
|
| - // Load all existing samples during construction. It's no worse to do it
|
| - // here than at some point in the future and could be better if construction
|
| - // takes place on some background thread. New samples could be created at
|
| - // any time by parallel threads; if so, they'll get loaded when needed.
|
| - ImportSamples(kAllSamples);
|
| -}
|
| + : PersistentSampleMap(id, allocator->UseSampleMapRecords(id, this), meta) {}
|
|
|
| -PersistentSampleMap::~PersistentSampleMap() {}
|
| +PersistentSampleMap::PersistentSampleMap(
|
| + uint64_t id,
|
| + PersistentSparseHistogramDataManager* manager,
|
| + Metadata* meta)
|
| + : PersistentSampleMap(id, manager->UseSampleMapRecords(id, this), meta) {}
|
| +
|
| +PersistentSampleMap::PersistentSampleMap(
|
| + uint64_t id,
|
| + PersistentSampleMapRecords* records,
|
| + Metadata* meta)
|
| + : HistogramSamples(id, meta), records_(records) {}
|
| +
|
| +PersistentSampleMap::~PersistentSampleMap() {
|
| + records_->Release(this);
|
| +}
|
|
|
| void PersistentSampleMap::Accumulate(Sample value, Count count) {
|
| *GetOrCreateSampleCountStorage(value) += count;
|
| @@ -140,6 +147,46 @@ std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
|
| return WrapUnique(new PersistentSampleMapIterator(sample_counts_));
|
| }
|
|
|
| +// static
|
| +PersistentMemoryAllocator::Reference
|
| +PersistentSampleMap::GetNextPersistentRecord(
|
| + PersistentMemoryAllocator::Iterator& iterator,
|
| + uint64_t* sample_map_id) {
|
| + PersistentMemoryAllocator::Reference ref =
|
| + iterator.GetNextOfType(kTypeIdSampleRecord);
|
| + const SampleRecord* record =
|
| + iterator.GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
|
| + if (!record)
|
| + return 0;
|
| +
|
| + *sample_map_id = record->id;
|
| + return ref;
|
| +}
|
| +
|
| +// static
|
| +PersistentMemoryAllocator::Reference
|
| +PersistentSampleMap::CreatePersistentRecord(
|
| + PersistentMemoryAllocator* allocator,
|
| + uint64_t sample_map_id,
|
| + Sample value) {
|
| + PersistentMemoryAllocator::Reference ref =
|
| + allocator->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
|
| + SampleRecord* record =
|
| + allocator->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
|
| +
|
| + if (!record) {
|
| + NOTREACHED() << "full=" << allocator->IsFull()
|
| + << ", corrupt=" << allocator->IsCorrupt();
|
| + return 0;
|
| + }
|
| +
|
| + record->id = sample_map_id;
|
| + record->value = value;
|
| + record->count = 0;
|
| + allocator->MakeIterable(ref);
|
| + return ref;
|
| +}
|
| +
|
| bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
|
| Operator op) {
|
| Sample min;
|
| @@ -175,25 +222,16 @@ Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
|
| return count_pointer;
|
|
|
| // Create a new record in persistent memory for the value.
|
| - PersistentMemoryAllocator::Reference ref =
|
| - allocator_->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
|
| - SampleRecord* record =
|
| - allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
|
| - if (!record) {
|
| - // If the allocator was unable to create a record then it is full or
|
| - // corrupt. Instead, allocate the counter from the heap. This sample will
|
| - // not be persistent, will not be shared, and will leak but it's better
|
| - // than crashing.
|
| - NOTREACHED() << "full=" << allocator_->IsFull()
|
| - << ", corrupt=" << allocator_->IsCorrupt();
|
| + PersistentMemoryAllocator::Reference ref = records_->CreateNew(value);
|
| + if (!ref) {
|
| + // If a new record could not be created then the underlying allocator is
|
| + // full or corrupt. Instead, allocate the counter from the heap. This
|
| + // sample will not be persistent, will not be shared, and will leak...
|
| + // but it's better than crashing.
|
| count_pointer = new Count(0);
|
| sample_counts_[value] = count_pointer;
|
| return count_pointer;
|
| }
|
| - record->id = id();
|
| - record->value = value;
|
| - record->count = 0; // Should already be zero but don't trust other processes.
|
| - allocator_->MakeIterable(ref);
|
|
|
| // A race condition between two independent processes (i.e. two independent
|
| // histogram objects sharing the same sample data) could cause two of the
|
| @@ -210,34 +248,14 @@ Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
|
| }
|
|
|
| Count* PersistentSampleMap::ImportSamples(Sample until_value) {
|
| - // TODO(bcwhite): This import operates in O(V+N) total time per sparse
|
| - // histogram where V is the number of values for this object and N is
|
| - // the number of other iterable objects in the allocator. This becomes
|
| - // O(S*(SV+N)) or O(S^2*V + SN) overall where S is the number of sparse
|
| - // histograms.
|
| - //
|
| - // This is actually okay when histograms are expected to exist for the
|
| - // lifetime of the program, spreading the cost out, and S and V are
|
| - // relatively small, as is the current case.
|
| - //
|
| - // However, it is not so good for objects that are created, detroyed, and
|
| - // recreated on a periodic basis, such as when making a snapshot of
|
| - // sparse histograms owned by another, ongoing process. In that case, the
|
| - // entire cost is compressed into a single sequential operation... on the
|
| - // UI thread no less.
|
| - //
|
| - // This will be addressed in a future CL.
|
| -
|
| PersistentMemoryAllocator::Reference ref;
|
| - while ((ref = sample_iter_.GetNextOfType(kTypeIdSampleRecord)) != 0) {
|
| + while ((ref = records_->GetNext()) != 0) {
|
| SampleRecord* record =
|
| - allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
|
| + records_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
|
| if (!record)
|
| continue;
|
|
|
| - // A sample record has been found but may not be for this histogram.
|
| - if (record->id != id())
|
| - continue;
|
| + DCHECK_EQ(id(), record->id);
|
|
|
| // Check if the record's value is already known.
|
| if (!ContainsKey(sample_counts_, record->value)) {
|
|
|