OLD | NEW |
| (Empty) |
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/metrics/histogram_persistence.h" | |
6 | |
7 #include "base/lazy_instance.h" | |
8 #include "base/logging.h" | |
9 #include "base/memory/scoped_ptr.h" | |
10 #include "base/metrics/histogram.h" | |
11 #include "base/metrics/histogram_base.h" | |
12 #include "base/metrics/histogram_samples.h" | |
13 #include "base/metrics/statistics_recorder.h" | |
14 #include "base/synchronization/lock.h" | |
15 | |
16 namespace base { | |
17 | |
18 namespace { | |
19 | |
20 // Enumerate possible creation results for reporting. | |
21 enum CreateHistogramResultType { | |
22 // Everything was fine. | |
23 CREATE_HISTOGRAM_SUCCESS = 0, | |
24 | |
25 // Pointer to metadata was not valid. | |
26 CREATE_HISTOGRAM_INVALID_METADATA_POINTER, | |
27 | |
28 // Histogram metadata was not valid. | |
29 CREATE_HISTOGRAM_INVALID_METADATA, | |
30 | |
31 // Ranges information was not valid. | |
32 CREATE_HISTOGRAM_INVALID_RANGES_ARRAY, | |
33 | |
34 // Counts information was not valid. | |
35 CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY, | |
36 | |
37 // Could not allocate histogram memory due to corruption. | |
38 CREATE_HISTOGRAM_ALLOCATOR_CORRUPT, | |
39 | |
40 // Could not allocate histogram memory due to lack of space. | |
41 CREATE_HISTOGRAM_ALLOCATOR_FULL, | |
42 | |
43 // Could not allocate histogram memory due to unknown error. | |
44 CREATE_HISTOGRAM_ALLOCATOR_ERROR, | |
45 | |
46 // Histogram was of unknown type. | |
47 CREATE_HISTOGRAM_UNKNOWN_TYPE, | |
48 | |
49 // Instance has detected a corrupt allocator (recorded only once). | |
50 CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT, | |
51 | |
52 // Always keep this at the end. | |
53 CREATE_HISTOGRAM_MAX | |
54 }; | |
55 | |
56 // Name of histogram for storing results of local operations. | |
57 const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result"; | |
58 | |
59 // Type identifiers used when storing in persistent memory so they can be | |
60 // identified during extraction; the first 4 bytes of the SHA1 of the name | |
61 // is used as a unique integer. A "version number" is added to the base | |
62 // so that, if the structure of that object changes, stored older versions | |
63 // will be safely ignored. | |
64 enum : uint32_t { | |
65 kTypeIdHistogram = 0xF1645910 + 2, // SHA1(Histogram) v2 | |
66 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1 | |
67 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1 | |
68 }; | |
69 | |
70 // This data must be held in persistent memory in order for processes to | |
71 // locate and use histograms created elsewhere. All elements must be of a | |
72 // fixed width to ensure 32/64-bit interoperability. | |
73 struct PersistentHistogramData { | |
74 int32_t histogram_type; | |
75 int32_t flags; | |
76 int32_t minimum; | |
77 int32_t maximum; | |
78 uint32_t bucket_count; | |
79 PersistentMemoryAllocator::Reference ranges_ref; | |
80 uint32_t ranges_checksum; | |
81 PersistentMemoryAllocator::Reference counts_ref; | |
82 HistogramSamples::Metadata samples_metadata; | |
83 HistogramSamples::Metadata logged_metadata; | |
84 | |
85 // Space for the histogram name will be added during the actual allocation | |
86 // request. This must be the last field of the structure. A zero-size array | |
87 // or a "flexible" array would be preferred but is not (yet) valid C++. | |
88 char name[1]; | |
89 }; | |
90 | |
91 // The object held here will obviously not be destructed at process exit | |
92 // but that's okay since PersistentMemoryAllocator objects are explicitly | |
93 // forbidden from doing anything essential at exit anyway due to the fact | |
94 // that they depend on data managed elsewhere and which could be destructed | |
95 // first. | |
96 PersistentMemoryAllocator* g_allocator = nullptr; | |
97 | |
98 // Take an array of range boundaries and create a proper BucketRanges object | |
99 // which is returned to the caller. A return of nullptr indicates that the | |
100 // passed boundaries are invalid. | |
101 BucketRanges* CreateRangesFromData(HistogramBase::Sample* ranges_data, | |
102 uint32_t ranges_checksum, | |
103 size_t count) { | |
104 scoped_ptr<BucketRanges> ranges(new BucketRanges(count)); | |
105 DCHECK_EQ(count, ranges->size()); | |
106 for (size_t i = 0; i < count; ++i) { | |
107 if (i > 0 && ranges_data[i] <= ranges_data[i - 1]) | |
108 return nullptr; | |
109 ranges->set_range(i, ranges_data[i]); | |
110 } | |
111 | |
112 ranges->ResetChecksum(); | |
113 if (ranges->checksum() != ranges_checksum) | |
114 return nullptr; | |
115 | |
116 return ranges.release(); | |
117 } | |
118 | |
119 // Calculate the number of bytes required to store all of a histogram's | |
120 // "counts". This will return zero (0) if |bucket_count| is not valid. | |
121 size_t CalculateRequiredCountsBytes(size_t bucket_count) { | |
122 // 2 because each "sample count" also requires a backup "logged count" | |
123 // used for calculating the delta during snapshot operations. | |
124 const unsigned kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount); | |
125 | |
126 // If the |bucket_count| is such that it would overflow the return type, | |
127 // perhaps as the result of a malicious actor, then return zero to | |
128 // indicate the problem to the caller. | |
129 if (bucket_count > std::numeric_limits<uint32_t>::max() / kBytesPerBucket) | |
130 return 0; | |
131 | |
132 return bucket_count * kBytesPerBucket; | |
133 } | |
134 | |
135 } // namespace | |
136 | |
137 const Feature kPersistentHistogramsFeature{ | |
138 "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT | |
139 }; | |
140 | |
141 // Get the histogram in which create results are stored. This is copied almost | |
142 // exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with added code | |
143 // to prevent recursion (a likely occurance because the creation of a new | |
144 // histogram can end up calling this.) | |
145 HistogramBase* GetCreateHistogramResultHistogram() { | |
146 static base::subtle::AtomicWord atomic_histogram_pointer = 0; | |
147 HistogramBase* histogram_pointer( | |
148 reinterpret_cast<HistogramBase*>( | |
149 base::subtle::Acquire_Load(&atomic_histogram_pointer))); | |
150 if (!histogram_pointer) { | |
151 // It's possible for multiple threads to make it here in parallel but | |
152 // they'll always return the same result as there is a mutex in the Get. | |
153 // The purpose of the "initialized" variable is just to ensure that | |
154 // the same thread doesn't recurse which is also why it doesn't have | |
155 // to be atomic. | |
156 static bool initialized = false; | |
157 if (!initialized) { | |
158 initialized = true; | |
159 if (g_allocator) { | |
160 DLOG(WARNING) << "Creating the results-histogram inside persistent" | |
161 << " memory can cause future allocations to crash if" | |
162 << " that memory is ever released (for testing)."; | |
163 } | |
164 | |
165 histogram_pointer = LinearHistogram::FactoryGet( | |
166 kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1, | |
167 HistogramBase::kUmaTargetedHistogramFlag); | |
168 base::subtle::Release_Store( | |
169 &atomic_histogram_pointer, | |
170 reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); | |
171 } | |
172 } | |
173 return histogram_pointer; | |
174 } | |
175 | |
176 // Record the result of a histogram creation. | |
177 void RecordCreateHistogramResult(CreateHistogramResultType result) { | |
178 HistogramBase* result_histogram = GetCreateHistogramResultHistogram(); | |
179 if (result_histogram) | |
180 result_histogram->Add(result); | |
181 } | |
182 | |
183 void SetPersistentHistogramMemoryAllocator( | |
184 PersistentMemoryAllocator* allocator) { | |
185 // Releasing or changing an allocator is extremely dangerous because it | |
186 // likely has histograms stored within it. If the backing memory is also | |
187 // also released, future accesses to those histograms will seg-fault. | |
188 CHECK(!g_allocator); | |
189 g_allocator = allocator; | |
190 } | |
191 | |
192 PersistentMemoryAllocator* GetPersistentHistogramMemoryAllocator() { | |
193 return g_allocator; | |
194 } | |
195 | |
196 PersistentMemoryAllocator* | |
197 ReleasePersistentHistogramMemoryAllocatorForTesting() { | |
198 PersistentMemoryAllocator* allocator = g_allocator; | |
199 if (!allocator) | |
200 return nullptr; | |
201 | |
202 // Before releasing the memory, it's necessary to have the Statistics- | |
203 // Recorder forget about the histograms contained therein; otherwise, | |
204 // some operations will try to access them and the released memory. | |
205 PersistentMemoryAllocator::Iterator iter; | |
206 PersistentMemoryAllocator::Reference ref; | |
207 uint32_t type_id; | |
208 allocator->CreateIterator(&iter); | |
209 while ((ref = allocator->GetNextIterable(&iter, &type_id)) != 0) { | |
210 if (type_id == kTypeIdHistogram) { | |
211 PersistentHistogramData* histogram_data = | |
212 allocator->GetAsObject<PersistentHistogramData>( | |
213 ref, kTypeIdHistogram); | |
214 DCHECK(histogram_data); | |
215 StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name); | |
216 | |
217 // If a test breaks here then a memory region containing a histogram | |
218 // actively used by this code is being released back to the test. | |
219 // If that memory segment were to be deleted, future calls to create | |
220 // persistent histograms would crash. To avoid this, have the test call | |
221 // the method GetCreateHistogramResultHistogram() *before* setting the | |
222 // (temporary) memory allocator via SetPersistentMemoryAllocator() so | |
223 // that the histogram is instead allocated from the process heap. | |
224 DCHECK_NE(kResultHistogram, histogram_data->name); | |
225 } | |
226 } | |
227 | |
228 g_allocator = nullptr; | |
229 return allocator; | |
230 }; | |
231 | |
232 HistogramBase* CreatePersistentHistogram( | |
233 PersistentMemoryAllocator* allocator, | |
234 PersistentHistogramData* histogram_data_ptr) { | |
235 if (!histogram_data_ptr) { | |
236 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER); | |
237 NOTREACHED(); | |
238 return nullptr; | |
239 } | |
240 | |
241 // Copy the histogram_data to local storage because anything in persistent | |
242 // memory cannot be trusted as it could be changed at any moment by a | |
243 // malicious actor that shares access. The contents of histogram_data are | |
244 // validated below; the local copy is to ensure that the contents cannot | |
245 // be externally changed between validation and use. | |
246 PersistentHistogramData histogram_data = *histogram_data_ptr; | |
247 | |
248 HistogramBase::Sample* ranges_data = | |
249 allocator->GetAsObject<HistogramBase::Sample>(histogram_data.ranges_ref, | |
250 kTypeIdRangesArray); | |
251 if (!ranges_data || histogram_data.bucket_count < 2 || | |
252 histogram_data.bucket_count + 1 > | |
253 std::numeric_limits<uint32_t>::max() / | |
254 sizeof(HistogramBase::Sample) || | |
255 allocator->GetAllocSize(histogram_data.ranges_ref) < | |
256 (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample)) { | |
257 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY); | |
258 NOTREACHED(); | |
259 return nullptr; | |
260 } | |
261 // To avoid racy destruction at shutdown, the following will be leaked. | |
262 const BucketRanges* ranges = CreateRangesFromData( | |
263 ranges_data, | |
264 histogram_data.ranges_checksum, | |
265 histogram_data.bucket_count + 1); | |
266 if (!ranges) { | |
267 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY); | |
268 NOTREACHED(); | |
269 return nullptr; | |
270 } | |
271 ranges = StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges); | |
272 | |
273 HistogramBase::AtomicCount* counts_data = | |
274 allocator->GetAsObject<HistogramBase::AtomicCount>( | |
275 histogram_data.counts_ref, kTypeIdCountsArray); | |
276 size_t counts_bytes = | |
277 CalculateRequiredCountsBytes(histogram_data.bucket_count); | |
278 if (!counts_data || !counts_bytes || | |
279 allocator->GetAllocSize(histogram_data.counts_ref) < counts_bytes) { | |
280 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY); | |
281 NOTREACHED(); | |
282 return nullptr; | |
283 } | |
284 | |
285 // After the main "counts" array is a second array using for storing what | |
286 // was previously logged. This is used to calculate the "delta" during | |
287 // snapshot operations. | |
288 HistogramBase::AtomicCount* logged_data = | |
289 counts_data + histogram_data.bucket_count; | |
290 | |
291 std::string name(histogram_data_ptr->name); | |
292 HistogramBase* histogram = nullptr; | |
293 switch (histogram_data.histogram_type) { | |
294 case HISTOGRAM: | |
295 histogram = Histogram::PersistentGet( | |
296 name, | |
297 histogram_data.minimum, | |
298 histogram_data.maximum, | |
299 ranges, | |
300 counts_data, | |
301 logged_data, | |
302 histogram_data.bucket_count, | |
303 &histogram_data_ptr->samples_metadata, | |
304 &histogram_data_ptr->logged_metadata); | |
305 DCHECK(histogram); | |
306 break; | |
307 case LINEAR_HISTOGRAM: | |
308 histogram = LinearHistogram::PersistentGet( | |
309 name, | |
310 histogram_data.minimum, | |
311 histogram_data.maximum, | |
312 ranges, | |
313 counts_data, | |
314 logged_data, | |
315 histogram_data.bucket_count, | |
316 &histogram_data_ptr->samples_metadata, | |
317 &histogram_data_ptr->logged_metadata); | |
318 DCHECK(histogram); | |
319 break; | |
320 case BOOLEAN_HISTOGRAM: | |
321 histogram = BooleanHistogram::PersistentGet( | |
322 name, | |
323 ranges, | |
324 counts_data, | |
325 logged_data, | |
326 &histogram_data_ptr->samples_metadata, | |
327 &histogram_data_ptr->logged_metadata); | |
328 DCHECK(histogram); | |
329 break; | |
330 case CUSTOM_HISTOGRAM: | |
331 histogram = CustomHistogram::PersistentGet( | |
332 name, | |
333 ranges, | |
334 counts_data, | |
335 logged_data, | |
336 histogram_data.bucket_count, | |
337 &histogram_data_ptr->samples_metadata, | |
338 &histogram_data_ptr->logged_metadata); | |
339 DCHECK(histogram); | |
340 break; | |
341 default: | |
342 NOTREACHED(); | |
343 } | |
344 | |
345 if (histogram) { | |
346 DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType()); | |
347 histogram->SetFlags(histogram_data.flags); | |
348 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS); | |
349 } else { | |
350 RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE); | |
351 } | |
352 | |
353 return histogram; | |
354 } | |
355 | |
356 HistogramBase* GetPersistentHistogram( | |
357 PersistentMemoryAllocator* allocator, | |
358 int32_t ref) { | |
359 // Unfortunately, the above "pickle" methods cannot be used as part of the | |
360 // persistance because the deserialization methods always create local | |
361 // count data (these must referenced the persistent counts) and always add | |
362 // it to the local list of known histograms (these may be simple references | |
363 // to histograms in other processes). | |
364 PersistentHistogramData* histogram_data = | |
365 allocator->GetAsObject<PersistentHistogramData>(ref, kTypeIdHistogram); | |
366 size_t length = allocator->GetAllocSize(ref); | |
367 if (!histogram_data || | |
368 reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') { | |
369 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA); | |
370 NOTREACHED(); | |
371 return nullptr; | |
372 } | |
373 return CreatePersistentHistogram(allocator, histogram_data); | |
374 } | |
375 | |
376 HistogramBase* GetNextPersistentHistogram( | |
377 PersistentMemoryAllocator* allocator, | |
378 PersistentMemoryAllocator::Iterator* iter) { | |
379 PersistentMemoryAllocator::Reference ref; | |
380 uint32_t type_id; | |
381 while ((ref = allocator->GetNextIterable(iter, &type_id)) != 0) { | |
382 if (type_id == kTypeIdHistogram) | |
383 return GetPersistentHistogram(allocator, ref); | |
384 } | |
385 return nullptr; | |
386 } | |
387 | |
388 void FinalizePersistentHistogram(PersistentMemoryAllocator::Reference ref, | |
389 bool registered) { | |
390 // If the created persistent histogram was registered then it needs to | |
391 // be marked as "iterable" in order to be found by other processes. | |
392 if (registered) | |
393 GetPersistentHistogramMemoryAllocator()->MakeIterable(ref); | |
394 // If it wasn't registered then a race condition must have caused | |
395 // two to be created. The allocator does not support releasing the | |
396 // acquired memory so just change the type to be empty. | |
397 else | |
398 GetPersistentHistogramMemoryAllocator()->SetType(ref, 0); | |
399 } | |
400 | |
401 HistogramBase* AllocatePersistentHistogram( | |
402 PersistentMemoryAllocator* allocator, | |
403 HistogramType histogram_type, | |
404 const std::string& name, | |
405 int minimum, | |
406 int maximum, | |
407 const BucketRanges* bucket_ranges, | |
408 int32_t flags, | |
409 PersistentMemoryAllocator::Reference* ref_ptr) { | |
410 if (!allocator) | |
411 return nullptr; | |
412 | |
413 // If the allocator is corrupt, don't waste time trying anything else. | |
414 // This also allows differentiating on the dashboard between allocations | |
415 // failed due to a corrupt allocator and the number of process instances | |
416 // with one, the latter being idicated by "newly corrupt", below. | |
417 if (allocator->IsCorrupt()) { | |
418 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT); | |
419 return nullptr; | |
420 } | |
421 | |
422 // If CalculateRequiredCountsBytes() returns zero then the bucket_count | |
423 // was not valid. | |
424 size_t bucket_count = bucket_ranges->bucket_count(); | |
425 size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count); | |
426 if (!counts_bytes) { | |
427 NOTREACHED(); | |
428 return nullptr; | |
429 } | |
430 | |
431 size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample); | |
432 PersistentMemoryAllocator::Reference ranges_ref = | |
433 allocator->Allocate(ranges_bytes, kTypeIdRangesArray); | |
434 PersistentMemoryAllocator::Reference counts_ref = | |
435 allocator->Allocate(counts_bytes, kTypeIdCountsArray); | |
436 PersistentMemoryAllocator::Reference histogram_ref = | |
437 allocator->Allocate(offsetof(PersistentHistogramData, name) + | |
438 name.length() + 1, kTypeIdHistogram); | |
439 HistogramBase::Sample* ranges_data = | |
440 allocator->GetAsObject<HistogramBase::Sample>(ranges_ref, | |
441 kTypeIdRangesArray); | |
442 PersistentHistogramData* histogram_data = | |
443 allocator->GetAsObject<PersistentHistogramData>(histogram_ref, | |
444 kTypeIdHistogram); | |
445 | |
446 // Only continue here if all allocations were successful. If they weren't | |
447 // there is no way to free the space but that's not really a problem since | |
448 // the allocations only fail because the space is full and so any future | |
449 // attempts will also fail. | |
450 if (counts_ref && ranges_data && histogram_data) { | |
451 strcpy(histogram_data->name, name.c_str()); | |
452 for (size_t i = 0; i < bucket_ranges->size(); ++i) | |
453 ranges_data[i] = bucket_ranges->range(i); | |
454 | |
455 histogram_data->histogram_type = histogram_type; | |
456 histogram_data->flags = flags; | |
457 histogram_data->minimum = minimum; | |
458 histogram_data->maximum = maximum; | |
459 histogram_data->bucket_count = static_cast<uint32_t>(bucket_count); | |
460 histogram_data->ranges_ref = ranges_ref; | |
461 histogram_data->ranges_checksum = bucket_ranges->checksum(); | |
462 histogram_data->counts_ref = counts_ref; | |
463 | |
464 // Create the histogram using resources in persistent memory. This ends up | |
465 // resolving the "ref" values stored in histogram_data instad of just | |
466 // using what is already known above but avoids duplicating the switch | |
467 // statement here and serves as a double-check that everything is | |
468 // correct before commiting the new histogram to persistent space. | |
469 HistogramBase* histogram = | |
470 CreatePersistentHistogram(allocator, histogram_data); | |
471 DCHECK(histogram); | |
472 if (ref_ptr != nullptr) | |
473 *ref_ptr = histogram_ref; | |
474 return histogram; | |
475 } | |
476 | |
477 CreateHistogramResultType result; | |
478 if (allocator->IsCorrupt()) { | |
479 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT); | |
480 result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT; | |
481 } else if (allocator->IsFull()) { | |
482 result = CREATE_HISTOGRAM_ALLOCATOR_FULL; | |
483 } else { | |
484 result = CREATE_HISTOGRAM_ALLOCATOR_ERROR; | |
485 } | |
486 RecordCreateHistogramResult(result); | |
487 NOTREACHED() << "error=" << result; | |
488 | |
489 return nullptr; | |
490 } | |
491 | |
492 void ImportPersistentHistograms() { | |
493 // The lock protects against concurrent access to the iterator and is created | |
494 // in a thread-safe manner when needed. | |
495 static base::LazyInstance<base::Lock>::Leaky lock = LAZY_INSTANCE_INITIALIZER; | |
496 | |
497 if (g_allocator) { | |
498 base::AutoLock auto_lock(lock.Get()); | |
499 | |
500 // Each call resumes from where it last left off so need persistant | |
501 // iterator. This class has a constructor so even the definition has | |
502 // to be protected by the lock in order to be thread-safe. | |
503 static PersistentMemoryAllocator::Iterator iter; | |
504 if (iter.is_clear()) | |
505 g_allocator->CreateIterator(&iter); | |
506 | |
507 while (true) { | |
508 HistogramBase* histogram = GetNextPersistentHistogram(g_allocator, &iter); | |
509 if (!histogram) | |
510 break; | |
511 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram); | |
512 } | |
513 } | |
514 } | |
515 | |
516 } // namespace base | |
OLD | NEW |