OLD | NEW |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/metrics/persistent_sample_map.h" | 5 #include "base/metrics/persistent_sample_map.h" |
6 | 6 |
7 #include "base/logging.h" | 7 #include "base/logging.h" |
8 #include "base/memory/ptr_util.h" | 8 #include "base/memory/ptr_util.h" |
| 9 #include "base/metrics/persistent_histogram_allocator.h" |
9 #include "base/stl_util.h" | 10 #include "base/stl_util.h" |
10 | 11 |
11 namespace base { | 12 namespace base { |
12 | 13 |
13 typedef HistogramBase::Count Count; | 14 typedef HistogramBase::Count Count; |
14 typedef HistogramBase::Sample Sample; | 15 typedef HistogramBase::Sample Sample; |
15 | 16 |
16 namespace { | 17 namespace { |
17 | 18 |
18 // An iterator for going through a PersistentSampleMap. The logic here is | 19 // An iterator for going through a PersistentSampleMap. The logic here is |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
86 Count count; // The count associated with the above value. | 87 Count count; // The count associated with the above value. |
87 }; | 88 }; |
88 | 89 |
89 // The type-id used to identify sample records inside an allocator. | 90 // The type-id used to identify sample records inside an allocator. |
90 const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1; // SHA1(SampleRecord) v1 | 91 const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1; // SHA1(SampleRecord) v1 |
91 | 92 |
92 } // namespace | 93 } // namespace |
93 | 94 |
94 PersistentSampleMap::PersistentSampleMap( | 95 PersistentSampleMap::PersistentSampleMap( |
95 uint64_t id, | 96 uint64_t id, |
96 PersistentMemoryAllocator* allocator, | 97 PersistentHistogramAllocator* allocator, |
97 Metadata* meta) | 98 Metadata* meta) |
98 : HistogramSamples(id, meta), | 99 : PersistentSampleMap(id, allocator->UseSampleMapRecords(id, this), meta) {} |
99 allocator_(allocator), | 100 |
100 sample_iter_(allocator) { | 101 PersistentSampleMap::PersistentSampleMap( |
101 // Load all existing samples during construction. It's no worse to do it | 102 uint64_t id, |
102 // here than at some point in the future and could be better if construction | 103 PersistentSparseHistogramDataManager* manager, |
103 // takes place on some background thread. New samples could be created at | 104 Metadata* meta) |
104 // any time by parallel threads; if so, they'll get loaded when needed. | 105 : PersistentSampleMap(id, manager->UseSampleMapRecords(id, this), meta) {} |
105 ImportSamples(kAllSamples); | 106 |
| 107 PersistentSampleMap::PersistentSampleMap( |
| 108 uint64_t id, |
| 109 PersistentSampleMapRecords* records, |
| 110 Metadata* meta) |
| 111 : HistogramSamples(id, meta), records_(records) {} |
| 112 |
| 113 PersistentSampleMap::~PersistentSampleMap() { |
| 114 records_->Release(this); |
106 } | 115 } |
107 | 116 |
108 PersistentSampleMap::~PersistentSampleMap() {} | |
109 | |
110 void PersistentSampleMap::Accumulate(Sample value, Count count) { | 117 void PersistentSampleMap::Accumulate(Sample value, Count count) { |
111 *GetOrCreateSampleCountStorage(value) += count; | 118 *GetOrCreateSampleCountStorage(value) += count; |
112 IncreaseSum(static_cast<int64_t>(count) * value); | 119 IncreaseSum(static_cast<int64_t>(count) * value); |
113 IncreaseRedundantCount(count); | 120 IncreaseRedundantCount(count); |
114 } | 121 } |
115 | 122 |
116 Count PersistentSampleMap::GetCount(Sample value) const { | 123 Count PersistentSampleMap::GetCount(Sample value) const { |
117 // Have to override "const" to make sure all samples have been loaded before | 124 // Have to override "const" to make sure all samples have been loaded before |
118 // being able to know what value to return. | 125 // being able to know what value to return. |
119 Count* count_pointer = | 126 Count* count_pointer = |
(...skipping 13 matching lines...) Expand all Loading... |
133 return count; | 140 return count; |
134 } | 141 } |
135 | 142 |
136 std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const { | 143 std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const { |
137 // Have to override "const" in order to make sure all samples have been | 144 // Have to override "const" in order to make sure all samples have been |
138 // loaded before trying to iterate over the map. | 145 // loaded before trying to iterate over the map. |
139 const_cast<PersistentSampleMap*>(this)->ImportSamples(kAllSamples); | 146 const_cast<PersistentSampleMap*>(this)->ImportSamples(kAllSamples); |
140 return WrapUnique(new PersistentSampleMapIterator(sample_counts_)); | 147 return WrapUnique(new PersistentSampleMapIterator(sample_counts_)); |
141 } | 148 } |
142 | 149 |
| 150 // static |
| 151 PersistentMemoryAllocator::Reference |
| 152 PersistentSampleMap::GetNextPersistentRecord( |
| 153 PersistentMemoryAllocator::Iterator& iterator, |
| 154 uint64_t* sample_map_id) { |
| 155 PersistentMemoryAllocator::Reference ref = |
| 156 iterator.GetNextOfType(kTypeIdSampleRecord); |
| 157 const SampleRecord* record = |
| 158 iterator.GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord); |
| 159 if (!record) |
| 160 return 0; |
| 161 |
| 162 *sample_map_id = record->id; |
| 163 return ref; |
| 164 } |
| 165 |
| 166 // static |
| 167 PersistentMemoryAllocator::Reference |
| 168 PersistentSampleMap::CreatePersistentRecord( |
| 169 PersistentMemoryAllocator* allocator, |
| 170 uint64_t sample_map_id, |
| 171 Sample value) { |
| 172 PersistentMemoryAllocator::Reference ref = |
| 173 allocator->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord); |
| 174 SampleRecord* record = |
| 175 allocator->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord); |
| 176 |
| 177 if (!record) { |
| 178 NOTREACHED() << "full=" << allocator->IsFull() |
| 179 << ", corrupt=" << allocator->IsCorrupt(); |
| 180 return 0; |
| 181 } |
| 182 |
| 183 record->id = sample_map_id; |
| 184 record->value = value; |
| 185 record->count = 0; |
| 186 allocator->MakeIterable(ref); |
| 187 return ref; |
| 188 } |
| 189 |
143 bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter, | 190 bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter, |
144 Operator op) { | 191 Operator op) { |
145 Sample min; | 192 Sample min; |
146 Sample max; | 193 Sample max; |
147 Count count; | 194 Count count; |
148 for (; !iter->Done(); iter->Next()) { | 195 for (; !iter->Done(); iter->Next()) { |
149 iter->Get(&min, &max, &count); | 196 iter->Get(&min, &max, &count); |
150 if (min + 1 != max) | 197 if (min + 1 != max) |
151 return false; // SparseHistogram only supports bucket with size 1. | 198 return false; // SparseHistogram only supports bucket with size 1. |
152 | 199 |
(...skipping 15 matching lines...) Expand all Loading... |
168 return ImportSamples(value); | 215 return ImportSamples(value); |
169 } | 216 } |
170 | 217 |
171 Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) { | 218 Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) { |
172 // Get any existing count storage. | 219 // Get any existing count storage. |
173 Count* count_pointer = GetSampleCountStorage(value); | 220 Count* count_pointer = GetSampleCountStorage(value); |
174 if (count_pointer) | 221 if (count_pointer) |
175 return count_pointer; | 222 return count_pointer; |
176 | 223 |
177 // Create a new record in persistent memory for the value. | 224 // Create a new record in persistent memory for the value. |
178 PersistentMemoryAllocator::Reference ref = | 225 PersistentMemoryAllocator::Reference ref = records_->CreateNew(value); |
179 allocator_->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord); | 226 if (!ref) { |
180 SampleRecord* record = | 227 // If a new record could not be created then the underlying allocator is |
181 allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord); | 228 // full or corrupt. Instead, allocate the counter from the heap. This |
182 if (!record) { | 229 // sample will not be persistent, will not be shared, and will leak... |
183 // If the allocator was unable to create a record then it is full or | 230 // but it's better than crashing. |
184 // corrupt. Instead, allocate the counter from the heap. This sample will | |
185 // not be persistent, will not be shared, and will leak but it's better | |
186 // than crashing. | |
187 NOTREACHED() << "full=" << allocator_->IsFull() | |
188 << ", corrupt=" << allocator_->IsCorrupt(); | |
189 count_pointer = new Count(0); | 231 count_pointer = new Count(0); |
190 sample_counts_[value] = count_pointer; | 232 sample_counts_[value] = count_pointer; |
191 return count_pointer; | 233 return count_pointer; |
192 } | 234 } |
193 record->id = id(); | |
194 record->value = value; | |
195 record->count = 0; // Should already be zero but don't trust other processes. | |
196 allocator_->MakeIterable(ref); | |
197 | 235 |
198 // A race condition between two independent processes (i.e. two independent | 236 // A race condition between two independent processes (i.e. two independent |
199 // histogram objects sharing the same sample data) could cause two of the | 237 // histogram objects sharing the same sample data) could cause two of the |
200 // above records to be created. The allocator, however, forces a strict | 238 // above records to be created. The allocator, however, forces a strict |
201 // ordering on iterable objects so use the import method to actually add the | 239 // ordering on iterable objects so use the import method to actually add the |
202 // just-created record. This ensures that all PersistentSampleMap objects | 240 // just-created record. This ensures that all PersistentSampleMap objects |
203 // will always use the same record, whichever was first made iterable. | 241 // will always use the same record, whichever was first made iterable. |
204 // Thread-safety within a process where multiple threads use the same | 242 // Thread-safety within a process where multiple threads use the same |
205 // histogram object is delegated to the controlling histogram object which, | 243 // histogram object is delegated to the controlling histogram object which, |
206 // for sparse histograms, is a lock object. | 244 // for sparse histograms, is a lock object. |
207 count_pointer = ImportSamples(value); | 245 count_pointer = ImportSamples(value); |
208 DCHECK(count_pointer); | 246 DCHECK(count_pointer); |
209 return count_pointer; | 247 return count_pointer; |
210 } | 248 } |
211 | 249 |
212 Count* PersistentSampleMap::ImportSamples(Sample until_value) { | 250 Count* PersistentSampleMap::ImportSamples(Sample until_value) { |
213 // TODO(bcwhite): This import operates in O(V+N) total time per sparse | |
214 // histogram where V is the number of values for this object and N is | |
215 // the number of other iterable objects in the allocator. This becomes | |
216 // O(S*(SV+N)) or O(S^2*V + SN) overall where S is the number of sparse | |
217 // histograms. | |
218 // | |
219 // This is actually okay when histograms are expected to exist for the | |
220 // lifetime of the program, spreading the cost out, and S and V are | |
221 // relatively small, as is the current case. | |
222 // | |
223 // However, it is not so good for objects that are created, detroyed, and | |
224 // recreated on a periodic basis, such as when making a snapshot of | |
225 // sparse histograms owned by another, ongoing process. In that case, the | |
226 // entire cost is compressed into a single sequential operation... on the | |
227 // UI thread no less. | |
228 // | |
229 // This will be addressed in a future CL. | |
230 | |
231 PersistentMemoryAllocator::Reference ref; | 251 PersistentMemoryAllocator::Reference ref; |
232 while ((ref = sample_iter_.GetNextOfType(kTypeIdSampleRecord)) != 0) { | 252 while ((ref = records_->GetNext()) != 0) { |
233 SampleRecord* record = | 253 SampleRecord* record = |
234 allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord); | 254 records_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord); |
235 if (!record) | 255 if (!record) |
236 continue; | 256 continue; |
237 | 257 |
238 // A sample record has been found but may not be for this histogram. | 258 DCHECK_EQ(id(), record->id); |
239 if (record->id != id()) | |
240 continue; | |
241 | 259 |
242 // Check if the record's value is already known. | 260 // Check if the record's value is already known. |
243 if (!ContainsKey(sample_counts_, record->value)) { | 261 if (!ContainsKey(sample_counts_, record->value)) { |
244 // No: Add it to map of known values if the value is valid. | 262 // No: Add it to map of known values if the value is valid. |
245 if (record->value >= 0) | 263 if (record->value >= 0) |
246 sample_counts_[record->value] = &record->count; | 264 sample_counts_[record->value] = &record->count; |
247 } else { | 265 } else { |
248 // Yes: Ignore it; it's a duplicate caused by a race condition -- see | 266 // Yes: Ignore it; it's a duplicate caused by a race condition -- see |
249 // code & comment in GetOrCreateSampleCountStorage() for details. | 267 // code & comment in GetOrCreateSampleCountStorage() for details. |
250 // Check that nothing ever operated on the duplicate record. | 268 // Check that nothing ever operated on the duplicate record. |
251 DCHECK_EQ(0, record->count); | 269 DCHECK_EQ(0, record->count); |
252 } | 270 } |
253 | 271 |
254 // Stop if it's the value being searched for. | 272 // Stop if it's the value being searched for. |
255 if (record->value == until_value) | 273 if (record->value == until_value) |
256 return &record->count; | 274 return &record->count; |
257 } | 275 } |
258 | 276 |
259 return nullptr; | 277 return nullptr; |
260 } | 278 } |
261 | 279 |
262 } // namespace base | 280 } // namespace base |
OLD | NEW |