OLD | NEW |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/metrics/persistent_histogram_allocator.h" | 5 #include "base/metrics/persistent_histogram_allocator.h" |
6 | 6 |
7 #include "base/lazy_instance.h" | 7 #include "base/lazy_instance.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
9 #include "base/memory/scoped_ptr.h" | 9 #include "base/memory/scoped_ptr.h" |
10 #include "base/metrics/histogram.h" | 10 #include "base/metrics/histogram.h" |
(...skipping 21 matching lines...) Expand all Loading... |
32 // will be safely ignored. | 32 // will be safely ignored. |
33 enum : uint32_t { | 33 enum : uint32_t { |
34 kTypeIdHistogram = 0xF1645910 + 2, // SHA1(Histogram) v2 | 34 kTypeIdHistogram = 0xF1645910 + 2, // SHA1(Histogram) v2 |
35 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1 | 35 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1 |
36 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1 | 36 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1 |
37 }; | 37 }; |
38 | 38 |
39 // The current globally-active persistent allocator for all new histograms. | 39 // The current globally-active persistent allocator for all new histograms. |
40 // The object held here will obviously not be destructed at process exit | 40 // The object held here will obviously not be destructed at process exit |
41 // but that's best since PersistentMemoryAllocator objects (that underlie | 41 // but that's best since PersistentMemoryAllocator objects (that underlie |
42 // PersistentHistogramAllocator objects) are explicitly forbidden from doing | 42 // GlobalHistogramAllocator objects) are explicitly forbidden from doing |
43 // anything essential at exit anyway due to the fact that they depend on data | 43 // anything essential at exit anyway due to the fact that they depend on data |
44 // managed elsewhere and which could be destructed first. | 44 // managed elsewhere and which could be destructed first. |
45 PersistentHistogramAllocator* g_allocator; | 45 GlobalHistogramAllocator* g_allocator; |
46 | 46 |
47 // Take an array of range boundaries and create a proper BucketRanges object | 47 // Take an array of range boundaries and create a proper BucketRanges object |
48 // which is returned to the caller. A return of nullptr indicates that the | 48 // which is returned to the caller. A return of nullptr indicates that the |
49 // passed boundaries are invalid. | 49 // passed boundaries are invalid. |
50 scoped_ptr<BucketRanges> CreateRangesFromData( | 50 scoped_ptr<BucketRanges> CreateRangesFromData( |
51 HistogramBase::Sample* ranges_data, | 51 HistogramBase::Sample* ranges_data, |
52 uint32_t ranges_checksum, | 52 uint32_t ranges_checksum, |
53 size_t count) { | 53 size_t count) { |
54 // To avoid racy destruction at shutdown, the following may be leaked. | 54 // To avoid racy destruction at shutdown, the following may be leaked. |
55 scoped_ptr<BucketRanges> ranges(new BucketRanges(count)); | 55 scoped_ptr<BucketRanges> ranges(new BucketRanges(count)); |
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
166 | 166 |
167 // static | 167 // static |
168 void PersistentHistogramAllocator::RecordCreateHistogramResult( | 168 void PersistentHistogramAllocator::RecordCreateHistogramResult( |
169 CreateHistogramResultType result) { | 169 CreateHistogramResultType result) { |
170 HistogramBase* result_histogram = GetCreateHistogramResultHistogram(); | 170 HistogramBase* result_histogram = GetCreateHistogramResultHistogram(); |
171 if (result_histogram) | 171 if (result_histogram) |
172 result_histogram->Add(result); | 172 result_histogram->Add(result); |
173 } | 173 } |
174 | 174 |
175 // static | 175 // static |
176 void PersistentHistogramAllocator::SetGlobalAllocator( | |
177 scoped_ptr<PersistentHistogramAllocator> allocator) { | |
178 // Releasing or changing an allocator is extremely dangerous because it | |
179 // likely has histograms stored within it. If the backing memory is also | |
180 // also released, future accesses to those histograms will seg-fault. | |
181 CHECK(!g_allocator); | |
182 g_allocator = allocator.release(); | |
183 | |
184 size_t existing = StatisticsRecorder::GetHistogramCount(); | |
185 DLOG_IF(WARNING, existing) | |
186 << existing | |
187 << " histograms were created before persistence was enabled."; | |
188 } | |
189 | |
190 // static | |
191 PersistentHistogramAllocator* | |
192 PersistentHistogramAllocator::GetGlobalAllocator() { | |
193 return g_allocator; | |
194 } | |
195 | |
196 // static | |
197 scoped_ptr<PersistentHistogramAllocator> | |
198 PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting() { | |
199 PersistentHistogramAllocator* histogram_allocator = g_allocator; | |
200 if (!histogram_allocator) | |
201 return nullptr; | |
202 PersistentMemoryAllocator* memory_allocator = | |
203 histogram_allocator->memory_allocator(); | |
204 | |
205 // Before releasing the memory, it's necessary to have the Statistics- | |
206 // Recorder forget about the histograms contained therein; otherwise, | |
207 // some operations will try to access them and the released memory. | |
208 PersistentMemoryAllocator::Iterator iter; | |
209 PersistentMemoryAllocator::Reference ref; | |
210 uint32_t type_id; | |
211 memory_allocator->CreateIterator(&iter); | |
212 while ((ref = memory_allocator->GetNextIterable(&iter, &type_id)) != 0) { | |
213 if (type_id == kTypeIdHistogram) { | |
214 PersistentHistogramData* histogram_data = | |
215 memory_allocator->GetAsObject<PersistentHistogramData>( | |
216 ref, kTypeIdHistogram); | |
217 DCHECK(histogram_data); | |
218 StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name); | |
219 | |
220 // If a test breaks here then a memory region containing a histogram | |
221 // actively used by this code is being released back to the test. | |
222 // If that memory segment were to be deleted, future calls to create | |
223 // persistent histograms would crash. To avoid this, have the test call | |
224 // the method GetCreateHistogramResultHistogram() *before* setting | |
225 // the (temporary) memory allocator via SetGlobalAllocator() so that | |
226 // histogram is instead allocated from the process heap. | |
227 DCHECK_NE(kResultHistogram, histogram_data->name); | |
228 } | |
229 } | |
230 | |
231 g_allocator = nullptr; | |
232 return make_scoped_ptr(histogram_allocator); | |
233 }; | |
234 | |
235 // static | |
236 void PersistentHistogramAllocator::CreateGlobalAllocatorOnPersistentMemory( | |
237 void* base, | |
238 size_t size, | |
239 size_t page_size, | |
240 uint64_t id, | |
241 StringPiece name) { | |
242 SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator( | |
243 make_scoped_ptr(new PersistentMemoryAllocator( | |
244 base, size, page_size, id, name, false))))); | |
245 } | |
246 | |
247 // static | |
248 void PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory( | |
249 size_t size, | |
250 uint64_t id, | |
251 StringPiece name) { | |
252 SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator( | |
253 make_scoped_ptr(new LocalPersistentMemoryAllocator(size, id, name))))); | |
254 } | |
255 | |
256 // static | |
257 void PersistentHistogramAllocator::CreateGlobalAllocatorOnSharedMemory( | |
258 size_t size, | |
259 const SharedMemoryHandle& handle) { | |
260 scoped_ptr<SharedMemory> shm(new SharedMemory(handle, /*readonly=*/false)); | |
261 if (!shm->Map(size)) { | |
262 NOTREACHED(); | |
263 return; | |
264 } | |
265 | |
266 SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator( | |
267 make_scoped_ptr(new SharedPersistentMemoryAllocator( | |
268 std::move(shm), 0, StringPiece(), /*readonly=*/false))))); | |
269 } | |
270 | |
271 // static | |
272 scoped_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram( | 176 scoped_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram( |
273 PersistentHistogramData* histogram_data_ptr) { | 177 PersistentHistogramData* histogram_data_ptr) { |
274 if (!histogram_data_ptr) { | 178 if (!histogram_data_ptr) { |
275 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER); | 179 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER); |
276 NOTREACHED(); | 180 NOTREACHED(); |
277 return nullptr; | 181 return nullptr; |
278 } | 182 } |
279 | 183 |
280 // Sparse histograms are quite different so handle them as a special case. | 184 // Sparse histograms are quite different so handle them as a special case. |
281 if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) { | 185 if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) { |
(...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
521 // using what is already known above but avoids duplicating the switch | 425 // using what is already known above but avoids duplicating the switch |
522 // statement here and serves as a double-check that everything is | 426 // statement here and serves as a double-check that everything is |
523 // correct before commiting the new histogram to persistent space. | 427 // correct before commiting the new histogram to persistent space. |
524 scoped_ptr<HistogramBase> histogram = CreateHistogram(histogram_data); | 428 scoped_ptr<HistogramBase> histogram = CreateHistogram(histogram_data); |
525 DCHECK(histogram); | 429 DCHECK(histogram); |
526 if (ref_ptr != nullptr) | 430 if (ref_ptr != nullptr) |
527 *ref_ptr = histogram_ref; | 431 *ref_ptr = histogram_ref; |
528 | 432 |
529 // By storing the reference within the allocator to this histogram, the | 433 // By storing the reference within the allocator to this histogram, the |
530 // next import (which will happen before the next histogram creation) | 434 // next import (which will happen before the next histogram creation) |
531 // will know to skip it. See also the comment in ImportGlobalHistograms(). | 435 // will know to skip it. |
| 436 // See also the comment in ImportHistogramsToStatisticsRecorder(). |
532 subtle::NoBarrier_Store(&last_created_, histogram_ref); | 437 subtle::NoBarrier_Store(&last_created_, histogram_ref); |
533 return histogram; | 438 return histogram; |
534 } | 439 } |
535 | 440 |
536 CreateHistogramResultType result; | 441 CreateHistogramResultType result; |
537 if (memory_allocator_->IsCorrupt()) { | 442 if (memory_allocator_->IsCorrupt()) { |
538 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT); | 443 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT); |
539 result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT; | 444 result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT; |
540 } else if (memory_allocator_->IsFull()) { | 445 } else if (memory_allocator_->IsFull()) { |
541 result = CREATE_HISTOGRAM_ALLOCATOR_FULL; | 446 result = CREATE_HISTOGRAM_ALLOCATOR_FULL; |
542 } else { | 447 } else { |
543 result = CREATE_HISTOGRAM_ALLOCATOR_ERROR; | 448 result = CREATE_HISTOGRAM_ALLOCATOR_ERROR; |
544 } | 449 } |
545 RecordCreateHistogramResult(result); | 450 RecordCreateHistogramResult(result); |
546 NOTREACHED() << "error=" << result; | 451 NOTREACHED() << "error=" << result; |
547 | 452 |
548 return nullptr; | 453 return nullptr; |
549 } | 454 } |
550 | 455 |
| 456 GlobalHistogramAllocator::~GlobalHistogramAllocator() {} |
| 457 |
551 // static | 458 // static |
552 void PersistentHistogramAllocator::ImportGlobalHistograms() { | 459 void GlobalHistogramAllocator::CreateWithPersistentMemory( |
553 // The lock protects against concurrent access to the iterator and is created | 460 void* base, |
554 // in a thread-safe manner when needed. | 461 size_t size, |
555 static base::LazyInstance<base::Lock>::Leaky lock = LAZY_INSTANCE_INITIALIZER; | 462 size_t page_size, |
| 463 uint64_t id, |
| 464 StringPiece name) { |
| 465 Set(make_scoped_ptr(new GlobalHistogramAllocator( |
| 466 make_scoped_ptr(new PersistentMemoryAllocator( |
| 467 base, size, page_size, id, name, false))))); |
| 468 } |
556 | 469 |
557 if (g_allocator) { | 470 // static |
558 // TODO(bcwhite): Investigate a lock-free, thread-safe iterator. | 471 void GlobalHistogramAllocator::CreateWithLocalMemory( |
559 base::AutoLock auto_lock(lock.Get()); | 472 size_t size, |
| 473 uint64_t id, |
| 474 StringPiece name) { |
| 475 Set(make_scoped_ptr(new GlobalHistogramAllocator( |
| 476 make_scoped_ptr(new LocalPersistentMemoryAllocator(size, id, name))))); |
| 477 } |
560 | 478 |
561 // Each call resumes from where it last left off so a persistant iterator | 479 // static |
562 // is needed. This class has a constructor so even the definition has to | 480 void GlobalHistogramAllocator::CreateWithSharedMemory( |
563 // be protected by the lock in order to be thread-safe. | 481 scoped_ptr<SharedMemory> memory, |
564 static Iterator iter; | 482 size_t size, |
565 if (iter.is_clear()) | 483 uint64_t id, |
566 g_allocator->CreateIterator(&iter); | 484 StringPiece name) { |
| 485 if (!memory->memory() && !memory->Map(size)) |
| 486 NOTREACHED(); |
567 | 487 |
568 // Skip the import if it's the histogram that was last created. Should a | 488 if (memory->memory()) { |
569 // race condition cause the "last created" to be overwritten before it | 489 DCHECK_LE(memory->mapped_size(), size); |
570 // is recognized here then the histogram will be created and be ignored | 490 Set(make_scoped_ptr(new GlobalHistogramAllocator( |
571 // when it is detected as a duplicate by the statistics-recorder. This | 491 make_scoped_ptr(new SharedPersistentMemoryAllocator( |
572 // simple check reduces the time of creating persistent histograms by | 492 std::move(memory), 0, StringPiece(), /*readonly=*/false))))); |
573 // about 40%. | 493 } |
574 Reference last_created = | 494 } |
575 subtle::NoBarrier_Load(&g_allocator->last_created_); | |
576 | 495 |
577 while (true) { | 496 // static |
578 scoped_ptr<HistogramBase> histogram = | 497 void GlobalHistogramAllocator::CreateWithSharedMemoryHandle( |
579 g_allocator->GetNextHistogramWithIgnore(&iter, last_created); | 498 const SharedMemoryHandle& handle, |
580 if (!histogram) | 499 size_t size) { |
581 break; | 500 scoped_ptr<SharedMemory> shm(new SharedMemory(handle, /*readonly=*/false)); |
582 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release()); | 501 if (!shm->Map(size)) { |
| 502 NOTREACHED(); |
| 503 return; |
| 504 } |
| 505 |
| 506 Set(make_scoped_ptr(new GlobalHistogramAllocator( |
| 507 make_scoped_ptr(new SharedPersistentMemoryAllocator( |
| 508 std::move(shm), 0, StringPiece(), /*readonly=*/false))))); |
| 509 } |
| 510 |
| 511 // static |
| 512 void GlobalHistogramAllocator::Set( |
| 513 scoped_ptr<GlobalHistogramAllocator> allocator) { |
| 514 // Releasing or changing an allocator is extremely dangerous because it |
| 515 // likely has histograms stored within it. If the backing memory is also |
| 516 // also released, future accesses to those histograms will seg-fault. |
| 517 CHECK(!g_allocator); |
| 518 g_allocator = allocator.release(); |
| 519 size_t existing = StatisticsRecorder::GetHistogramCount(); |
| 520 |
| 521 DLOG_IF(WARNING, existing) |
| 522 << existing << " histograms were created before persistence was enabled."; |
| 523 } |
| 524 |
| 525 // static |
| 526 GlobalHistogramAllocator* GlobalHistogramAllocator::Get() { |
| 527 return g_allocator; |
| 528 } |
| 529 |
| 530 // static |
| 531 scoped_ptr<GlobalHistogramAllocator> |
| 532 GlobalHistogramAllocator::ReleaseForTesting() { |
| 533 GlobalHistogramAllocator* histogram_allocator = g_allocator; |
| 534 if (!histogram_allocator) |
| 535 return nullptr; |
| 536 PersistentMemoryAllocator* memory_allocator = |
| 537 histogram_allocator->memory_allocator(); |
| 538 |
| 539 // Before releasing the memory, it's necessary to have the Statistics- |
| 540 // Recorder forget about the histograms contained therein; otherwise, |
| 541 // some operations will try to access them and the released memory. |
| 542 PersistentMemoryAllocator::Iterator iter; |
| 543 PersistentMemoryAllocator::Reference ref; |
| 544 uint32_t type_id; |
| 545 memory_allocator->CreateIterator(&iter); |
| 546 while ((ref = memory_allocator->GetNextIterable(&iter, &type_id)) != 0) { |
| 547 if (type_id == kTypeIdHistogram) { |
| 548 PersistentHistogramData* histogram_data = |
| 549 memory_allocator->GetAsObject<PersistentHistogramData>( |
| 550 ref, kTypeIdHistogram); |
| 551 DCHECK(histogram_data); |
| 552 StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name); |
| 553 |
| 554 // If a test breaks here then a memory region containing a histogram |
| 555 // actively used by this code is being released back to the test. |
| 556 // If that memory segment were to be deleted, future calls to create |
| 557 // persistent histograms would crash. To avoid this, have the test call |
| 558 // the method GetCreateHistogramResultHistogram() *before* setting |
| 559 // the (temporary) memory allocator via SetGlobalAllocator() so that |
| 560 // histogram is instead allocated from the process heap. |
| 561 DCHECK_NE(kResultHistogram, histogram_data->name); |
583 } | 562 } |
584 } | 563 } |
| 564 |
| 565 g_allocator = nullptr; |
| 566 return make_scoped_ptr(histogram_allocator); |
| 567 }; |
| 568 |
| 569 GlobalHistogramAllocator::GlobalHistogramAllocator( |
| 570 scoped_ptr<PersistentMemoryAllocator> memory) |
| 571 : PersistentHistogramAllocator(std::move(memory)) { |
| 572 CreateIterator(&import_iterator_); |
| 573 } |
| 574 |
| 575 void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() { |
| 576 // Skip the import if it's the histogram that was last created. Should a |
| 577 // race condition cause the "last created" to be overwritten before it |
| 578 // is recognized here then the histogram will be created and be ignored |
| 579 // when it is detected as a duplicate by the statistics-recorder. This |
| 580 // simple check reduces the time of creating persistent histograms by |
| 581 // about 40%. |
| 582 Reference last_created = subtle::NoBarrier_Load(&last_created_); |
| 583 |
| 584 // There is no lock on this because it's expected to be called only by |
| 585 // the StatisticsRecorder which has its own lock. |
| 586 while (true) { |
| 587 scoped_ptr<HistogramBase> histogram = |
| 588 GetNextHistogramWithIgnore(&import_iterator_, last_created); |
| 589 if (!histogram) |
| 590 break; |
| 591 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release()); |
| 592 } |
585 } | 593 } |
586 | 594 |
587 } // namespace base | 595 } // namespace base |
OLD | NEW |