Index: base/metrics/persistent_histogram_allocator.cc |
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc |
index 1d5c0ef0e1c5d90175f8456469b67502f34e1d28..5b45f9042a6b41db8ddbc4b437d12ba542ee0cea 100644 |
--- a/base/metrics/persistent_histogram_allocator.cc |
+++ b/base/metrics/persistent_histogram_allocator.cc |
@@ -12,6 +12,7 @@ |
#include "base/metrics/histogram.h" |
#include "base/metrics/histogram_base.h" |
#include "base/metrics/histogram_samples.h" |
+#include "base/metrics/persistent_sample_map.h" |
#include "base/metrics/sparse_histogram.h" |
#include "base/metrics/statistics_recorder.h" |
#include "base/synchronization/lock.h" |
@@ -125,9 +126,15 @@ PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) { |
return nullptr; |
} |
+ |
PersistentHistogramAllocator::PersistentHistogramAllocator( |
std::unique_ptr<PersistentMemoryAllocator> memory) |
- : memory_allocator_(std::move(memory)) {} |
+ : memory_allocator_(std::move(memory)) { |
+ // Need to allocate this in a thread-safe manner so do it during the |
+ // initial construction of the object. |
+ sparse_histogram_helper_.reset( |
+ new PersistentSparseHistogramHelper(memory_allocator())); |
+} |
PersistentHistogramAllocator::~PersistentHistogramAllocator() {} |
@@ -196,8 +203,7 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram( |
// Sparse histograms are quite different so handle them as a special case. |
if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) { |
std::unique_ptr<HistogramBase> histogram = |
- SparseHistogram::PersistentCreate(memory_allocator(), |
- histogram_data_ptr->name, |
+ SparseHistogram::PersistentCreate(this, histogram_data_ptr->name, |
&histogram_data_ptr->samples_metadata, |
&histogram_data_ptr->logged_metadata); |
DCHECK(histogram); |
@@ -343,6 +349,11 @@ void PersistentHistogramAllocator::FinalizeHistogram(Reference ref, |
memory_allocator_->SetType(ref, 0); |
} |
+PersistentSampleMapRecords* PersistentHistogramAllocator::GetSampleMapRecords( |
+ uint64_t id) { |
+ return sparse_histogram_helper_->GetSampleMapRecords(id); |
+} |
+ |
std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram( |
HistogramType histogram_type, |
const std::string& name, |
@@ -589,4 +600,104 @@ void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() { |
} |
} |
+ |
+PersistentSparseHistogramHelper::PersistentSparseHistogramHelper( |
+ PersistentMemoryAllocator* allocator) |
+ : allocator_(allocator), record_iterator_(allocator) {} |
+ |
+PersistentSparseHistogramHelper::~PersistentSparseHistogramHelper() {} |
+ |
+PersistentSampleMapRecords* |
+PersistentSparseHistogramHelper::GetSampleMapRecords(uint64_t id) { |
+ base::AutoLock auto_lock(lock_); |
+ return GetSampleMapRecordsWhileLocked(id); |
+} |
+ |
+PersistentSampleMapRecords* |
+PersistentSparseHistogramHelper::GetSampleMapRecordsWhileLocked(uint64_t id) { |
+ PersistentSampleMapRecords* samples = sample_records_.Lookup(id); |
+ if (samples) |
+ return samples; |
+ |
+ samples = new PersistentSampleMapRecords(this, id); |
+ sample_records_.AddWithID(samples, id); // Ownership is transferred. |
+ return samples; |
+} |
+ |
+bool PersistentSparseHistogramHelper::LoadRecordsForSampleMapWhileLocked( |
+ uint64_t load_id) { |
Alexei Svitkine (slow)
2016/04/13 15:53:50
Nit: Add lock_.AssertAcquired()
bcwhite
2016/04/13 22:45:03
Done.
|
+ // Acquiring a lock is a semi-expensive operation so try to load more than |
+ // a single record with each call. More than this number may be loaded if |
+ // the next record for the specified histogram is further out. |
+ const int kMinimumNumberToLoad = 10; |
+ bool found = false; |
+ |
+ for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) { |
Alexei Svitkine (slow)
2016/04/13 15:53:50
Nit: I would just remove the found var and return
bcwhite
2016/04/13 22:45:03
For performance reasons, I want to load a minimum
Alexei Svitkine (slow)
2016/04/14 19:30:03
I understand that, I was not suggesting here to re
bcwhite
2016/04/15 02:36:18
That would stop loading when the first match was f
Alexei Svitkine (slow)
2016/04/15 15:05:07
Ah. I had missed the fact that you're using || in
bcwhite
2016/04/15 18:23:01
Done.
|
+ // Get the next sample-record. |
+ uint64_t sample_map_id; |
+ PersistentMemoryAllocator::Reference ref = |
+ PersistentSampleMap::GetNextPersistentRecord(record_iterator_, |
Alexei Svitkine (slow)
2016/04/13 15:53:50
In the previous code, you had a comment about the
bcwhite
2016/04/13 22:45:03
Done.
|
+ &sample_map_id); |
+ |
+ // Stop immediately if there are none. |
+ if (!ref) |
+ break; |
+ |
+ // The sample-record could be for any sparse histogram. Add the reference |
+ // to the appropriate collection for later use. |
+ PersistentSampleMapRecords* samples = |
+ GetSampleMapRecordsWhileLocked(sample_map_id); |
+ DCHECK(samples); |
+ samples->found_.push_back(ref); |
+ if (sample_map_id == load_id) |
+ found = true; |
+ } |
+ |
+ return found; |
+} |
+ |
+ |
+PersistentSampleMapRecords::PersistentSampleMapRecords( |
+ PersistentSparseHistogramHelper* helper, |
+ uint64_t sample_map_id) |
+ : helper_(helper), sample_map_id_(sample_map_id) {} |
+ |
+PersistentSampleMapRecords::~PersistentSampleMapRecords() {} |
+ |
+void PersistentSampleMapRecords::Reset() { |
+ seen_ = 0; |
+} |
+ |
+PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() { |
+ // If there are no unseen records, lock and swap in all the found ones. |
+ if (records_.size() == seen_) { |
+ base::AutoLock auto_lock(helper_->lock_); |
+ |
+ // If there are no found records either, load some for this histogram. |
+ if (found_.empty()) { |
+ if (!helper_->LoadRecordsForSampleMapWhileLocked(sample_map_id_)) |
Alexei Svitkine (slow)
2016/04/13 15:53:50
So the function you call will give up after not fi
bcwhite
2016/04/13 22:45:03
It loads 10 records. If it hasn't found one for t
Alexei Svitkine (slow)
2016/04/14 19:30:03
Where does it continue? I don't see a loop inside
bcwhite
2016/04/15 02:36:18
"!found" in the "for" condition.
LoadRecordsForSa
|
+ return 0; |
Alexei Svitkine (slow)
2016/04/13 15:53:50
Hmm, can we expose kReferenceNull to be public and
bcwhite
2016/04/13 22:45:03
Done.
|
+ } |
+ |
+ DCHECK(!found_.empty()); |
+ records_.reserve(records_.size() + found_.size()); |
Alexei Svitkine (slow)
2016/04/13 15:53:50
Seems wasteful to copy this stuff twice. How about
bcwhite
2016/04/13 22:45:03
Done but I think this makes the code more confusin
|
+ records_.insert(records_.end(), found_.begin(), found_.end()); |
+ found_.clear(); |
+ } |
+ |
+ // Return the next record. Records *must* be returned in the same order |
+ // they are found in the persistent memory in order to ensure that all |
+ // objects using this data always have the same state. Race conditions |
+ // can cause duplicate records so using the "first found" is the only |
+ // guarantee that all objects always access the same one. |
+ DCHECK_LT(seen_, records_.size()); |
+ return records_[seen_++]; |
Alexei Svitkine (slow)
2016/04/13 15:53:50
Shouldn't this code also be locked?
bcwhite
2016/04/13 22:45:03
No need. records_ is only ever accessed by a sing
Alexei Svitkine (slow)
2016/04/14 19:30:03
I see. So it's only the helper that needs the lock
bcwhite
2016/04/15 02:36:18
Hmmm... Let me think about it. I worry that the
bcwhite
2016/04/15 14:12:57
Done.
|
+} |
+ |
+PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew( |
+ HistogramBase::Sample value) { |
+ return PersistentSampleMap::CreatePersistentRecord(helper_->allocator_, |
+ sample_map_id_, value); |
+} |
+ |
} // namespace base |