OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/metrics/histogram_persistence.h" | |
6 | |
7 #include "base/logging.h" | |
8 #include "base/memory/scoped_ptr.h" | |
9 #include "base/metrics/histogram.h" | |
10 #include "base/metrics/histogram_base.h" | |
11 #include "base/metrics/histogram_samples.h" | |
12 #include "base/metrics/statistics_recorder.h" | |
13 #include "base/synchronization/lock.h" | |
14 | |
15 namespace base { | |
16 | |
17 namespace { | |
18 | |
19 // Type identifiers used when storing in persistent memory so they can be | |
20 // identified during extraction; the first 4 bytes of the SHA1 of the name | |
21 // is used as a unique integer. A "version number" is added to the base | |
22 // so that, if the structure of that object changes, stored older versions | |
23 // will be safely ignored. | |
24 enum : uint32_t { | |
25 kTypeIdHistogram = 0xF1645910 + 1, // SHA1(Histogram) v1 | |
26 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1 | |
27 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1 | |
28 }; | |
29 | |
30 // This data must be held in persistent memory in order for processes to | |
31 // locate and use histograms created elsewhere. | |
32 struct PersistentHistogramData { | |
33 int histogram_type; | |
34 int flags; | |
35 int minimum; | |
36 int maximum; | |
37 size_t bucket_count; | |
38 PersistentMemoryAllocator::Reference ranges_ref; | |
39 uint32_t ranges_checksum; | |
40 PersistentMemoryAllocator::Reference counts_ref; | |
41 HistogramSamples::Metadata samples_metadata; | |
42 | |
43 // Space for the histogram name will be added during the actual allocation | |
44 // request. This must be the last field of the structure. A zero-size array | |
45 // or a "flexible" array would be preferred but is not (yet) valid C++. | |
46 char name[1]; | |
47 }; | |
48 | |
49 // The object held here will obviously not be destructed at process exit | |
50 // but that's okay since PersistentMemoryAllocator objects are explicitly | |
51 // forbidden from doing anything essential at exit anyway due to the fact | |
52 // that they depend on data managed elsewhere and which could be destructed | |
53 // first. | |
54 PersistentMemoryAllocator* g_allocator = nullptr; | |
55 | |
56 // Take an array of range boundaries and create a proper BucketRanges object | |
57 // which is returned to the caller. A return of nullptr indicates that the | |
58 // passed boundaries are invalid. | |
59 BucketRanges* CreateRangesFromData(HistogramBase::Sample* ranges_data, | |
60 uint32_t ranges_checksum, | |
61 size_t count) { | |
62 scoped_ptr<BucketRanges> ranges(new BucketRanges(count)); | |
63 DCHECK_EQ(count, ranges->size()); | |
64 for (size_t i = 0; i < count; ++i) { | |
65 if (i > 0 && ranges_data[i] <= ranges_data[i - 1]) | |
66 return nullptr; | |
67 ranges->set_range(i, ranges_data[i]); | |
68 } | |
69 | |
70 ranges->ResetChecksum(); | |
71 if (ranges->checksum() != ranges_checksum) | |
72 return nullptr; | |
73 | |
74 return ranges.release(); | |
75 } | |
76 | |
77 } // namespace | |
78 | |
79 const Feature kPersistentHistogramsFeature{ | |
80 "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT | |
81 }; | |
82 | |
83 // Get the histogram in which create results are stored. This is copied almost | |
84 // exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with added code | |
85 // to prevent recursion (a likely occurance because the creation of a new | |
86 // histogram can end up calling this.) | |
87 HistogramBase* GetCreateHistogramResultHistogram() { | |
88 static base::subtle::AtomicWord atomic_histogram_pointer = 0; \ | |
Alexei Svitkine (slow)
2016/01/22 16:15:16
Remove \
bcwhite
2016/01/22 17:17:48
Oops. Thought I got all those.
| |
89 HistogramBase* histogram_pointer( | |
90 reinterpret_cast<HistogramBase*>( | |
91 base::subtle::Acquire_Load(&atomic_histogram_pointer))); | |
92 if (!histogram_pointer) { | |
93 // It's possible for multiple threads to make it here in parallel but | |
94 // they'll always return the same result; there is a mutex in the Get. | |
Alexei Svitkine (slow)
2016/01/22 16:15:16
Nit: "same result; there is" -> "same result as th
bcwhite
2016/01/22 17:17:48
Done.
| |
95 // The purpose of the "initialized" variable is just to ensure that | |
96 // the same thread doesn't recurse which is also why it doesn't have | |
97 // to be atomic. | |
98 static bool initialized = false; | |
99 if (!initialized) { | |
100 initialized = true; | |
101 histogram_pointer = LinearHistogram::FactoryGet( | |
102 "CreatePersistentHistogram.Result", | |
Alexei Svitkine (slow)
2016/01/22 16:15:16
Change this to UMA.CreatePersistanteHistogram.Resu
bcwhite
2016/01/22 17:17:48
Done.
| |
103 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1, | |
104 HistogramBase::kNoFlags); | |
Alexei Svitkine (slow)
2016/01/22 16:15:16
kUmaTargetedHistogramFlag
bcwhite
2016/01/22 17:17:48
Done.
| |
105 base::subtle::Release_Store( | |
106 &atomic_histogram_pointer, | |
107 reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); | |
108 } | |
109 } | |
110 return histogram_pointer; | |
111 } | |
112 | |
113 void SetPersistentHistogramMemoryAllocator( | |
114 PersistentMemoryAllocator* allocator) { | |
115 // Releasing or changing an allocator is extremely dangerous because it | |
116 // likely has histograms stored within it. If the backing memory is also | |
117 // also released, future accesses to those histograms will seg-fault. | |
118 // It's not a fatal CHECK() because tests do this knowing that all | |
119 // such persistent histograms have already been forgotten. | |
120 if (g_allocator) { | |
121 LOG(WARNING) << "Active PersistentMemoryAllocator has been released." | |
122 << " Some existing histogram pointers may be invalid."; | |
123 delete g_allocator; | |
124 } | |
125 g_allocator = allocator; | |
126 } | |
127 | |
128 PersistentMemoryAllocator* GetPersistentHistogramMemoryAllocator() { | |
129 return g_allocator; | |
130 } | |
131 | |
132 PersistentMemoryAllocator* ReleasePersistentHistogramMemoryAllocator() { | |
133 PersistentMemoryAllocator* allocator = g_allocator; | |
134 g_allocator = nullptr; | |
135 return allocator; | |
136 }; | |
137 | |
138 HistogramBase* CreatePersistentHistogram( | |
139 PersistentMemoryAllocator* allocator, | |
140 PersistentHistogramData* histogram_data_ptr) { | |
141 HistogramBase* result_histogram = GetCreateHistogramResultHistogram(); | |
142 | |
143 if (!histogram_data_ptr) { | |
144 if (result_histogram) | |
145 result_histogram->Add(CREATE_HISTOGRAM_INVALID_METADATA_POINTER); | |
146 NOTREACHED(); | |
147 return nullptr; | |
148 } | |
149 | |
150 // Copy the histogram_data to local storage because anything in persistent | |
151 // memory cannot be trusted as it could be changed at any moment by a | |
152 // malicious actor that shares access. The contents of histogram_data are | |
153 // validated below; the local copy is to ensure that the contents cannot | |
154 // be externally changed between validation and use. | |
155 PersistentHistogramData histogram_data = *histogram_data_ptr; | |
156 | |
157 HistogramBase::Sample* ranges_data = | |
158 allocator->GetAsObject<HistogramBase::Sample>(histogram_data.ranges_ref, | |
159 kTypeIdRangesArray); | |
160 if (!ranges_data || histogram_data.bucket_count < 2 || | |
161 histogram_data.bucket_count + 1 > | |
162 std::numeric_limits<size_t>::max() / sizeof(HistogramBase::Sample) || | |
163 allocator->GetAllocSize(histogram_data.ranges_ref) < | |
164 (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample)) { | |
165 if (result_histogram) | |
166 result_histogram->Add(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY); | |
167 NOTREACHED(); | |
168 return nullptr; | |
169 } | |
170 // To avoid racy destruction at shutdown, the following will be leaked. | |
171 const BucketRanges* ranges = CreateRangesFromData( | |
172 ranges_data, | |
173 histogram_data.ranges_checksum, | |
174 histogram_data.bucket_count + 1); | |
175 if (!ranges) { | |
176 if (result_histogram) | |
177 result_histogram->Add(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY); | |
178 NOTREACHED(); | |
179 return nullptr; | |
180 } | |
181 ranges = StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges); | |
182 | |
183 HistogramBase::AtomicCount* counts_data = | |
184 allocator->GetAsObject<HistogramBase::AtomicCount>( | |
185 histogram_data.counts_ref, kTypeIdCountsArray); | |
186 if (!counts_data || | |
187 allocator->GetAllocSize(histogram_data.counts_ref) < | |
188 histogram_data.bucket_count * sizeof(HistogramBase::AtomicCount)) { | |
189 if (result_histogram) | |
190 result_histogram->Add(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY); | |
191 NOTREACHED(); | |
192 return nullptr; | |
193 } | |
194 | |
195 std::string name(histogram_data_ptr->name); | |
196 HistogramBase* histogram = nullptr; | |
197 switch (histogram_data.histogram_type) { | |
198 case HISTOGRAM: | |
199 histogram = Histogram::PersistentGet( | |
200 name, | |
201 histogram_data.minimum, | |
202 histogram_data.maximum, | |
203 ranges, | |
204 counts_data, | |
205 histogram_data.bucket_count, | |
206 &histogram_data_ptr->samples_metadata); | |
207 break; | |
208 case LINEAR_HISTOGRAM: | |
209 histogram = LinearHistogram::PersistentGet( | |
210 name, | |
211 histogram_data.minimum, | |
212 histogram_data.maximum, | |
213 ranges, | |
214 counts_data, | |
215 histogram_data.bucket_count, | |
216 &histogram_data_ptr->samples_metadata); | |
217 break; | |
218 case BOOLEAN_HISTOGRAM: | |
219 histogram = BooleanHistogram::PersistentGet( | |
220 name, | |
221 ranges, | |
222 counts_data, | |
223 &histogram_data_ptr->samples_metadata); | |
224 break; | |
225 case CUSTOM_HISTOGRAM: | |
226 histogram = CustomHistogram::PersistentGet( | |
227 name, | |
228 ranges, | |
229 counts_data, | |
230 histogram_data.bucket_count, | |
231 &histogram_data_ptr->samples_metadata); | |
232 break; | |
233 } | |
234 | |
235 if (histogram) { | |
236 DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType()); | |
237 histogram->SetFlags(histogram_data.flags); | |
238 } | |
239 | |
240 if (result_histogram) | |
241 result_histogram->Add(CREATE_HISTOGRAM_SUCCESS); | |
242 return histogram; | |
243 } | |
244 | |
245 HistogramBase* GetPersistentHistogram( | |
246 PersistentMemoryAllocator* allocator, | |
247 int32_t ref) { | |
248 // Unfortunately, the above "pickle" methods cannot be used as part of the | |
249 // persistance because the deserialization methods always create local | |
250 // count data (these must referenced the persistent counts) and always add | |
251 // it to the local list of known histograms (these may be simple references | |
252 // to histograms in other processes). | |
253 PersistentHistogramData* histogram_data = | |
254 allocator->GetAsObject<PersistentHistogramData>(ref, kTypeIdHistogram); | |
255 size_t length = allocator->GetAllocSize(ref); | |
256 if (!histogram_data || | |
257 reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') { | |
258 HistogramBase* result_histogram = GetCreateHistogramResultHistogram(); | |
259 if (result_histogram) | |
260 result_histogram->Add(CREATE_HISTOGRAM_INVALID_METADATA); | |
Alexei Svitkine (slow)
2016/01/22 16:15:16
Nit: Instead of having this pattern with the if st
bcwhite
2016/01/22 17:17:48
Done.
| |
261 NOTREACHED(); | |
262 return nullptr; | |
263 } | |
264 return CreatePersistentHistogram(allocator, histogram_data); | |
265 } | |
266 | |
267 HistogramBase* GetNextPersistentHistogram( | |
268 PersistentMemoryAllocator* allocator, | |
269 PersistentMemoryAllocator::Iterator* iter) { | |
270 PersistentMemoryAllocator::Reference ref; | |
271 uint32_t type_id; | |
272 while ((ref = allocator->GetNextIterable(iter, &type_id)) != 0) { | |
273 if (type_id == kTypeIdHistogram) | |
274 return GetPersistentHistogram(allocator, ref); | |
275 } | |
276 return nullptr; | |
277 } | |
278 | |
279 void FinalizePersistentHistogram(PersistentMemoryAllocator::Reference ref, | |
280 bool registered) { | |
281 // If the created persistent histogram was registered then it needs to | |
282 // be marked as "iterable" in order to be found by other processes. | |
283 if (registered) | |
284 GetPersistentHistogramMemoryAllocator()->MakeIterable(ref); | |
285 // If it wasn't registered then a race condition must have caused | |
286 // two to be created. The allocator does not support releasing the | |
287 // acquired memory so just change the type to be empty. | |
288 else | |
289 GetPersistentHistogramMemoryAllocator()->SetType(ref, 0); | |
290 } | |
291 | |
292 HistogramBase* AllocatePersistentHistogram( | |
293 PersistentMemoryAllocator* allocator, | |
294 HistogramType histogram_type, | |
295 const std::string& name, | |
296 int minimum, | |
297 int maximum, | |
298 const BucketRanges* bucket_ranges, | |
299 int32_t flags, | |
300 PersistentMemoryAllocator::Reference* ref_ptr) { | |
301 if (!allocator) | |
302 return nullptr; | |
303 | |
304 size_t bucket_count = bucket_ranges->bucket_count(); | |
305 // An overflow such as this, perhaps as the result of a milicious actor, | |
306 // could lead to writing beyond the allocation boundary and into other | |
307 // memory. Just fail the allocation and let the caller deal with it. | |
308 if (bucket_count > std::numeric_limits<int32_t>::max() / | |
309 sizeof(HistogramBase::AtomicCount)) { | |
310 NOTREACHED(); | |
311 return nullptr; | |
312 } | |
313 size_t counts_bytes = bucket_count * sizeof(HistogramBase::AtomicCount); | |
314 size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample); | |
315 PersistentMemoryAllocator::Reference ranges_ref = | |
316 allocator->Allocate(ranges_bytes, kTypeIdRangesArray); | |
317 PersistentMemoryAllocator::Reference counts_ref = | |
318 allocator->Allocate(counts_bytes, kTypeIdCountsArray); | |
319 PersistentMemoryAllocator::Reference histogram_ref = | |
320 allocator->Allocate(offsetof(PersistentHistogramData, name) + | |
321 name.length() + 1, kTypeIdHistogram); | |
322 HistogramBase::Sample* ranges_data = | |
323 allocator->GetAsObject<HistogramBase::Sample>(ranges_ref, | |
324 kTypeIdRangesArray); | |
325 PersistentHistogramData* histogram_data = | |
326 allocator->GetAsObject<PersistentHistogramData>(histogram_ref, | |
327 kTypeIdHistogram); | |
328 | |
329 // Only continue here if all allocations were successful. If they weren't | |
330 // there is no way to free the space but that's not really a problem since | |
331 // the allocations only fail because the space is full and so any future | |
332 // attempts will also fail. | |
333 if (counts_ref && ranges_data && histogram_data) { | |
334 strcpy(histogram_data->name, name.c_str()); | |
335 for (size_t i = 0; i < bucket_ranges->size(); ++i) | |
336 ranges_data[i] = bucket_ranges->range(i); | |
337 | |
338 histogram_data->histogram_type = histogram_type; | |
339 histogram_data->flags = flags; | |
340 histogram_data->minimum = minimum; | |
341 histogram_data->maximum = maximum; | |
342 histogram_data->bucket_count = bucket_count; | |
343 histogram_data->ranges_ref = ranges_ref; | |
344 histogram_data->ranges_checksum = bucket_ranges->checksum(); | |
345 histogram_data->counts_ref = counts_ref; | |
346 | |
347 // Create the histogram using resources in persistent memory. This ends up | |
348 // resolving the "ref" values stored in histogram_data instad of just | |
349 // using what is already known above but avoids duplicating the switch | |
350 // statement here and serves as a double-check that everything is | |
351 // correct before commiting the new histogram to persistent space. | |
352 HistogramBase* histogram = | |
353 CreatePersistentHistogram(allocator, histogram_data); | |
354 DCHECK(histogram); | |
355 if (ref_ptr != nullptr) | |
356 *ref_ptr = histogram_ref; | |
357 return histogram; | |
358 } | |
359 | |
360 HistogramBase* result_histogram = GetCreateHistogramResultHistogram(); | |
361 if (result_histogram) { | |
362 if (allocator->IsCorrupt()) { | |
363 result_histogram->Add(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT); | |
364 } else if (allocator->IsFull()) { | |
365 result_histogram->Add(CREATE_HISTOGRAM_ALLOCATOR_FULL); | |
366 } else { | |
367 result_histogram->Add(CREATE_HISTOGRAM_ALLOCATOR_ERROR); | |
368 } | |
369 } | |
370 | |
371 return nullptr; | |
372 } | |
373 | |
374 void ImportPersistentHistograms() { | |
375 // Each call resumes from where it last left off so need persistant iterator. | |
376 // The lock protects against concurrent access to the iterator and is created | |
377 // dynamically so as to not require destruction during program exit. | |
378 static PersistentMemoryAllocator::Iterator iter; | |
379 static base::Lock* lock = new base::Lock(); | |
380 | |
381 if (g_allocator) { | |
382 base::AutoLock auto_lock(*lock); | |
383 if (iter.is_clear()) | |
384 g_allocator->CreateIterator(&iter); | |
385 | |
386 for (;;) { | |
387 HistogramBase* histogram = GetNextPersistentHistogram(g_allocator, &iter); | |
388 if (!histogram) | |
389 break; | |
390 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram); | |
391 } | |
392 } | |
393 } | |
394 | |
395 } // namespace base | |
OLD | NEW |