Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(651)

Side by Side Diff: base/metrics/persistent_histogram_allocator.cc

Issue 1840843004: Improve efficiency of persistent sparse histograms. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@improved-pma-iterator
Patch Set: rebased Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/metrics/persistent_histogram_allocator.h" 5 #include "base/metrics/persistent_histogram_allocator.h"
6 6
7 #include <memory> 7 #include <memory>
8 8
9 #include "base/lazy_instance.h" 9 #include "base/lazy_instance.h"
10 #include "base/logging.h" 10 #include "base/logging.h"
11 #include "base/memory/ptr_util.h" 11 #include "base/memory/ptr_util.h"
12 #include "base/metrics/histogram.h" 12 #include "base/metrics/histogram.h"
13 #include "base/metrics/histogram_base.h" 13 #include "base/metrics/histogram_base.h"
14 #include "base/metrics/histogram_samples.h" 14 #include "base/metrics/histogram_samples.h"
15 #include "base/metrics/persistent_sample_map.h"
15 #include "base/metrics/sparse_histogram.h" 16 #include "base/metrics/sparse_histogram.h"
16 #include "base/metrics/statistics_recorder.h" 17 #include "base/metrics/statistics_recorder.h"
17 #include "base/synchronization/lock.h" 18 #include "base/synchronization/lock.h"
18 19
19 // TODO(bcwhite): Order these methods to match the header file. The current 20 // TODO(bcwhite): Order these methods to match the header file. The current
20 // order is only temporary in order to aid review of the transition from 21 // order is only temporary in order to aid review of the transition from
21 // a non-class implementation. 22 // a non-class implementation.
22 23
23 namespace base { 24 namespace base {
24 25
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
118 std::unique_ptr<HistogramBase> 119 std::unique_ptr<HistogramBase>
119 PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) { 120 PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
120 PersistentMemoryAllocator::Reference ref; 121 PersistentMemoryAllocator::Reference ref;
121 while ((ref = memory_iter_.GetNextOfType(kTypeIdHistogram)) != 0) { 122 while ((ref = memory_iter_.GetNextOfType(kTypeIdHistogram)) != 0) {
122 if (ref != ignore) 123 if (ref != ignore)
123 return allocator_->GetHistogram(ref); 124 return allocator_->GetHistogram(ref);
124 } 125 }
125 return nullptr; 126 return nullptr;
126 } 127 }
127 128
129
128 PersistentHistogramAllocator::PersistentHistogramAllocator( 130 PersistentHistogramAllocator::PersistentHistogramAllocator(
129 std::unique_ptr<PersistentMemoryAllocator> memory) 131 std::unique_ptr<PersistentMemoryAllocator> memory)
130 : memory_allocator_(std::move(memory)) {} 132 : memory_allocator_(std::move(memory)) {
133 // Need to allocate this in a thread-safe manner so do it during the
134 // initial construction of the object.
135 sparse_histogram_helper_.reset(
136 new PersistentSparseHistogramHelper(memory_allocator()));
137 }
131 138
132 PersistentHistogramAllocator::~PersistentHistogramAllocator() {} 139 PersistentHistogramAllocator::~PersistentHistogramAllocator() {}
133 140
134 void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) { 141 void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
135 memory_allocator_->CreateTrackingHistograms(name); 142 memory_allocator_->CreateTrackingHistograms(name);
136 } 143 }
137 144
138 void PersistentHistogramAllocator::UpdateTrackingHistograms() { 145 void PersistentHistogramAllocator::UpdateTrackingHistograms() {
139 memory_allocator_->UpdateTrackingHistograms(); 146 memory_allocator_->UpdateTrackingHistograms();
140 } 147 }
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
189 PersistentHistogramData* histogram_data_ptr) { 196 PersistentHistogramData* histogram_data_ptr) {
190 if (!histogram_data_ptr) { 197 if (!histogram_data_ptr) {
191 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER); 198 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
192 NOTREACHED(); 199 NOTREACHED();
193 return nullptr; 200 return nullptr;
194 } 201 }
195 202
196 // Sparse histograms are quite different so handle them as a special case. 203 // Sparse histograms are quite different so handle them as a special case.
197 if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) { 204 if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
198 std::unique_ptr<HistogramBase> histogram = 205 std::unique_ptr<HistogramBase> histogram =
199 SparseHistogram::PersistentCreate(memory_allocator(), 206 SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
200 histogram_data_ptr->name,
201 &histogram_data_ptr->samples_metadata, 207 &histogram_data_ptr->samples_metadata,
202 &histogram_data_ptr->logged_metadata); 208 &histogram_data_ptr->logged_metadata);
203 DCHECK(histogram); 209 DCHECK(histogram);
204 histogram->SetFlags(histogram_data_ptr->flags); 210 histogram->SetFlags(histogram_data_ptr->flags);
205 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS); 211 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
206 return histogram; 212 return histogram;
207 } 213 }
208 214
209 // Copy the histogram_data to local storage because anything in persistent 215 // Copy the histogram_data to local storage because anything in persistent
210 // memory cannot be trusted as it could be changed at any moment by a 216 // memory cannot be trusted as it could be changed at any moment by a
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
336 // be marked as "iterable" in order to be found by other processes. 342 // be marked as "iterable" in order to be found by other processes.
337 if (registered) 343 if (registered)
338 memory_allocator_->MakeIterable(ref); 344 memory_allocator_->MakeIterable(ref);
339 // If it wasn't registered then a race condition must have caused 345 // If it wasn't registered then a race condition must have caused
340 // two to be created. The allocator does not support releasing the 346 // two to be created. The allocator does not support releasing the
341 // acquired memory so just change the type to be empty. 347 // acquired memory so just change the type to be empty.
342 else 348 else
343 memory_allocator_->SetType(ref, 0); 349 memory_allocator_->SetType(ref, 0);
344 } 350 }
345 351
352 PersistentSampleMapRecords* PersistentHistogramAllocator::GetSampleMapRecords(
353 uint64_t id) {
354 return sparse_histogram_helper_->GetSampleMapRecords(id);
355 }
356
346 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram( 357 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
347 HistogramType histogram_type, 358 HistogramType histogram_type,
348 const std::string& name, 359 const std::string& name,
349 int minimum, 360 int minimum,
350 int maximum, 361 int maximum,
351 const BucketRanges* bucket_ranges, 362 const BucketRanges* bucket_ranges,
352 int32_t flags, 363 int32_t flags,
353 Reference* ref_ptr) { 364 Reference* ref_ptr) {
354 // If the allocator is corrupt, don't waste time trying anything else. 365 // If the allocator is corrupt, don't waste time trying anything else.
355 // This also allows differentiating on the dashboard between allocations 366 // This also allows differentiating on the dashboard between allocations
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after
582 // has its own lock so the Register operation is safe. 593 // has its own lock so the Register operation is safe.
583 while (true) { 594 while (true) {
584 std::unique_ptr<HistogramBase> histogram = 595 std::unique_ptr<HistogramBase> histogram =
585 import_iterator_.GetNextWithIgnore(record_to_ignore); 596 import_iterator_.GetNextWithIgnore(record_to_ignore);
586 if (!histogram) 597 if (!histogram)
587 break; 598 break;
588 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release()); 599 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
589 } 600 }
590 } 601 }
591 602
603
604 PersistentSparseHistogramHelper::PersistentSparseHistogramHelper(
605 PersistentMemoryAllocator* allocator)
606 : allocator_(allocator), record_iterator_(allocator) {}
607
608 PersistentSparseHistogramHelper::~PersistentSparseHistogramHelper() {}
609
610 PersistentSampleMapRecords*
611 PersistentSparseHistogramHelper::GetSampleMapRecords(uint64_t id) {
612 base::AutoLock auto_lock(lock_);
613 return GetSampleMapRecordsWhileLocked(id);
614 }
615
616 PersistentSampleMapRecords*
617 PersistentSparseHistogramHelper::GetSampleMapRecordsWhileLocked(uint64_t id) {
618 PersistentSampleMapRecords* samples = sample_records_.Lookup(id);
619 if (samples)
620 return samples;
621
622 samples = new PersistentSampleMapRecords(this, id);
623 sample_records_.AddWithID(samples, id); // Ownership is transferred.
624 return samples;
625 }
626
627 bool PersistentSparseHistogramHelper::LoadRecordsForSampleMapWhileLocked(
628 uint64_t load_id) {
Alexei Svitkine (slow) 2016/04/13 15:53:50 Nit: Add lock_.AssertAcquired()
bcwhite 2016/04/13 22:45:03 Done.
629 // Acquiring a lock is a semi-expensive operation so try to load more than
630 // a single record with each call. More than this number may be loaded if
631 // the next record for the specified histogram is further out.
632 const int kMinimumNumberToLoad = 10;
633 bool found = false;
634
635 for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
Alexei Svitkine (slow) 2016/04/13 15:53:50 Nit: I would just remove the found var and return
bcwhite 2016/04/13 22:45:03 For performance reasons, I want to load a minimum
Alexei Svitkine (slow) 2016/04/14 19:30:03 I understand that, I was not suggesting here to re
bcwhite 2016/04/15 02:36:18 That would stop loading when the first match was f
Alexei Svitkine (slow) 2016/04/15 15:05:07 Ah. I had missed the fact that you're using || in
bcwhite 2016/04/15 18:23:01 Done.
636 // Get the next sample-record.
637 uint64_t sample_map_id;
638 PersistentMemoryAllocator::Reference ref =
639 PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
Alexei Svitkine (slow) 2016/04/13 15:53:50 In the previous code, you had a comment about the
bcwhite 2016/04/13 22:45:03 Done.
640 &sample_map_id);
641
642 // Stop immediately if there are none.
643 if (!ref)
644 break;
645
646 // The sample-record could be for any sparse histogram. Add the reference
647 // to the appropriate collection for later use.
648 PersistentSampleMapRecords* samples =
649 GetSampleMapRecordsWhileLocked(sample_map_id);
650 DCHECK(samples);
651 samples->found_.push_back(ref);
652 if (sample_map_id == load_id)
653 found = true;
654 }
655
656 return found;
657 }
658
659
660 PersistentSampleMapRecords::PersistentSampleMapRecords(
661 PersistentSparseHistogramHelper* helper,
662 uint64_t sample_map_id)
663 : helper_(helper), sample_map_id_(sample_map_id) {}
664
665 PersistentSampleMapRecords::~PersistentSampleMapRecords() {}
666
667 void PersistentSampleMapRecords::Reset() {
668 seen_ = 0;
669 }
670
671 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
672 // If there are no unseen records, lock and swap in all the found ones.
673 if (records_.size() == seen_) {
674 base::AutoLock auto_lock(helper_->lock_);
675
676 // If there are no found records either, load some for this histogram.
677 if (found_.empty()) {
678 if (!helper_->LoadRecordsForSampleMapWhileLocked(sample_map_id_))
Alexei Svitkine (slow) 2016/04/13 15:53:50 So the function you call will give up after not fi
bcwhite 2016/04/13 22:45:03 It loads 10 records. If it hasn't found one for t
Alexei Svitkine (slow) 2016/04/14 19:30:03 Where does it continue? I don't see a loop inside
bcwhite 2016/04/15 02:36:18 "!found" in the "for" condition. LoadRecordsForSa
679 return 0;
Alexei Svitkine (slow) 2016/04/13 15:53:50 Hmm, can we expose kReferenceNull to be public and
bcwhite 2016/04/13 22:45:03 Done.
680 }
681
682 DCHECK(!found_.empty());
683 records_.reserve(records_.size() + found_.size());
Alexei Svitkine (slow) 2016/04/13 15:53:50 Seems wasteful to copy this stuff twice. How about
bcwhite 2016/04/13 22:45:03 Done but I think this makes the code more confusin
684 records_.insert(records_.end(), found_.begin(), found_.end());
685 found_.clear();
686 }
687
688 // Return the next record. Records *must* be returned in the same order
689 // they are found in the persistent memory in order to ensure that all
690 // objects using this data always have the same state. Race conditions
691 // can cause duplicate records so using the "first found" is the only
692 // guarantee that all objects always access the same one.
693 DCHECK_LT(seen_, records_.size());
694 return records_[seen_++];
Alexei Svitkine (slow) 2016/04/13 15:53:50 Shouldn't this code also be locked?
bcwhite 2016/04/13 22:45:03 No need. records_ is only ever accessed by a sing
Alexei Svitkine (slow) 2016/04/14 19:30:03 I see. So it's only the helper that needs the lock
bcwhite 2016/04/15 02:36:18 Hmmm... Let me think about it. I worry that the
bcwhite 2016/04/15 14:12:57 Done.
695 }
696
697 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
698 HistogramBase::Sample value) {
699 return PersistentSampleMap::CreatePersistentRecord(helper_->allocator_,
700 sample_map_id_, value);
701 }
702
592 } // namespace base 703 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698