Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(134)

Side by Side Diff: base/metrics/persistent_sample_map.cc

Issue 1734033003: Add support for persistent sparse histograms. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: merge common code for sparse/regular histogram creation Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/persistent_sample_map.h"
6
7 #include "base/logging.h"
8 #include "base/stl_util.h"
9
10 namespace base {
11
12 typedef HistogramBase::Count Count;
13 typedef HistogramBase::Sample Sample;
14
15 namespace {
16
17 // This structure holds an entry for a PersistentSampleMap within a persistent
18 // memory allocator. The "id" must be unique across all maps held by an
19 // allocator or they will get attached to the wrong sample map.
20 struct SampleRecord {
21 uint64_t id; // Unique identifier of owner.
22 Sample value; // The value for which this record holds a count.
23 Count count; // The count associated with the above value.
24 };
25
26 // The type-id used to identify sample records inside an allocator.
27 const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1; // SHA1(SampleRecord) v1
28
29 } // namespace
30
31 PersistentSampleMap::PersistentSampleMap(
32 uint64_t id,
33 PersistentMemoryAllocator* allocator,
34 Metadata* meta)
35 : HistogramSamples(id, meta),
36 allocator_(allocator) {
37 // This is created once but will continue to return new iterables even when
38 // it has previously reached the end.
39 allocator->CreateIterator(&sample_iter_);
40
41 // Load all existing samples during construction. It's no worse to do it
42 // here than at some point in the future and could be better if construction
43 // takes place on some background thread. New samples could be created at
44 // any time by parallel threads; if so, they'll get loaded when needed.
45 ImportSamples(kAllSamples);
46 }
47
48 PersistentSampleMap::~PersistentSampleMap() {}
49
50 void PersistentSampleMap::Accumulate(Sample value, Count count) {
51 *GetOrCreateSampleCountStorage(value) += count;
52 IncreaseSum(static_cast<int64_t>(count) * value);
53 IncreaseRedundantCount(count);
54 }
55
56 Count PersistentSampleMap::GetCount(Sample value) const {
57 // Have to override "const" to make sure all samples have been loaded before
58 // being able to know what value to return.
59 Count* count_pointer =
60 const_cast<PersistentSampleMap*>(this)->GetSampleCountStorage(value);
61 return count_pointer ? *count_pointer : 0;
62 }
63
64 Count PersistentSampleMap::TotalCount() const {
65 // Have to override "const" in order to make sure all samples have been
66 // loaded before trying to iterate over the map.
67 const_cast<PersistentSampleMap*>(this)->ImportSamples(kAllSamples);
68
69 Count count = 0;
70 for (const auto& entry : sample_counts_)
71 count += *entry.second;
72 return count;
73 }
74
75 scoped_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
76 // Have to override "const" in order to make sure all samples have been
77 // loaded before trying to iterate over the map.
78 const_cast<PersistentSampleMap*>(this)->ImportSamples(kAllSamples);
79 return make_scoped_ptr(new PersistentSampleMapIterator(sample_counts_));
80 }
81
82 bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
83 Operator op) {
84 Sample min;
85 Sample max;
86 Count count;
87 for (; !iter->Done(); iter->Next()) {
88 iter->Get(&min, &max, &count);
89 if (min + 1 != max)
90 return false; // SparseHistogram only supports bucket with size 1.
91
92 *GetOrCreateSampleCountStorage(min) +=
93 (op == HistogramSamples::ADD) ? count : -count;
94 }
95 return true;
96 }
97
98 Count* PersistentSampleMap::GetSampleCountStorage(Sample value) {
99 DCHECK_LE(0, value);
100
101 // If |value| is already in the map, just return that.
102 auto it = sample_counts_.find(value);
103 if (it != sample_counts_.end())
104 return it->second;
105
106 // Import any new samples from persistent memory looking for the value.
107 return ImportSamples(value);
108 }
109
110 Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
111 // Get any existing count storage.
112 Count* count_pointer = GetSampleCountStorage(value);
113 if (count_pointer)
114 return count_pointer;
115
116 // Create a new record in persistent memory for the value.
117 PersistentMemoryAllocator::Reference ref =
118 allocator_->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
119 SampleRecord* record =
120 allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
121 if (!record) {
122 // If the allocator was unable to create a record then it is full or
123 // corrupt. Instead, allocate the counter from the heap. This sample will
124 // not be persistent, will not be shared, and will leak but it's better
125 // than crashing.
126 NOTREACHED() << "full=" << allocator_->IsFull()
127 << ", corrupt=" << allocator_->IsCorrupt();
128 count_pointer = new Count(0);
Alexei Svitkine (slow) 2016/03/08 19:46:25 Who owns this? Doesn't seem like the map does. Is
bcwhite 2016/03/09 01:16:52 It does, just above.
129 sample_counts_[value] = count_pointer;
130 return count_pointer;
131 }
132 record->id = id();
133 record->value = value;
134 record->count = 0; // Should already be zero but don't trust other processes.
135 allocator_->MakeIterable(ref);
136
137 // A race condition could cause two of the above records to be created. The
Alexei Svitkine (slow) 2016/03/08 19:46:25 If there could be a race - how is this code in gen
bcwhite 2016/03/09 01:16:52 The race is between processes that share the histo
138 // allocator, however, forces a strict ordering on iterable objects so use
139 // the import method to actually add the just-created record. This ensures
140 // that all PersistentSampleMap objects will always use the same record,
141 // whichever was first made iterable.
142 count_pointer = ImportSamples(value);
143 DCHECK(count_pointer);
144 return count_pointer;
145 }
146
147 Count* PersistentSampleMap::ImportSamples(Sample until_value) {
148 // TODO(bcwhite): This import operates in O(V+N) total time per sparse
149 // histogram where V is the number of values for this object and N is
150 // the number of other iterable objects in the allocator. This becomes
151 // O(S*(SV+N)) or O(S^2*V + SN) overall where S is the number of sparse
152 // histograms.
153 //
154 // This is actually okay when histograms are expected to exist for the
155 // lifetime of the program, spreading the cost out, and S and V are
156 // relatively small, as is the current case.
157 //
158 // However, it is not so good for objects that are created, detroyed, and
159 // recreated on a periodic basis, such as when making a snapshot of
160 // sparse histograms owned by another, ongoing process. In that case, the
161 // entire cost is compressed into a single sequential operation... on the
162 // UI thread no less.
163 //
164 // This will be addressed in a future CL.
165
166 uint32_t type_id;
167 PersistentMemoryAllocator::Reference ref;
168 while ((ref = allocator_->GetNextIterable(&sample_iter_, &type_id)) != 0) {
Alexei Svitkine (slow) 2016/03/08 19:46:25 I think it would be useful to add GetNextIterableO
bcwhite 2016/03/09 01:16:52 Perhaps. I have some ideas for changing the itera
169 if (type_id == kTypeIdSampleRecord) {
170 SampleRecord* record =
171 allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
172 if (!record)
173 continue;
174
175 // A sample record has been found but may not be for this histogram.
176 if (record->id != id())
177 continue;
178
179 // Check if the record's value is already known.
180 if (!ContainsKey(sample_counts_, record->value)) {
181 // No: Add it to map of known values if the value is valid.
182 if (record->value >= 0)
183 sample_counts_[record->value] = &record->count;
184 } else {
185 // Yes: Ignore it; it's a duplicate caused by a race condition.
186 DCHECK_EQ(0, record->count); // Duplicate record should never be used.
Alexei Svitkine (slow) 2016/03/08 19:46:25 Expand comment to mention "see the code in GetOrCr
bcwhite 2016/03/09 01:16:52 Done.
187 }
188
189 // Stop if it's the value being searched for.
190 if (record->value == until_value)
191 return &record->count;
192 }
193 }
194
195 return nullptr;
196 }
197
198 PersistentSampleMapIterator::PersistentSampleMapIterator(
199 const SampleToCountMap& sample_counts)
200 : iter_(sample_counts.begin()),
201 end_(sample_counts.end()) {
202 SkipEmptyBuckets();
203 }
204
205 PersistentSampleMapIterator::~PersistentSampleMapIterator() {}
206
207 bool PersistentSampleMapIterator::Done() const {
208 return iter_ == end_;
209 }
210
211 void PersistentSampleMapIterator::Next() {
212 DCHECK(!Done());
213 ++iter_;
214 SkipEmptyBuckets();
215 }
216
217 void PersistentSampleMapIterator::Get(Sample* min,
218 Sample* max,
219 Count* count) const {
220 DCHECK(!Done());
221 if (min)
222 *min = iter_->first;
223 if (max)
224 *max = iter_->first + 1;
225 if (count)
226 *count = *iter_->second;
227 }
228
229 void PersistentSampleMapIterator::SkipEmptyBuckets() {
230 while (!Done() && *iter_->second == 0)
Alexei Svitkine (slow) 2016/03/08 19:46:25 Nit: For loops, prefer {}'s even for one liners.
bcwhite 2016/03/09 01:16:52 Greg specifically asked me to remove them.
Alexei Svitkine (slow) 2016/03/09 20:35:19 I think for metrics/ code we actually prefer to ke
bcwhite 2016/03/09 22:57:33 Done.
231 ++iter_;
232 }
233
234 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698