Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(120)

Side by Side Diff: base/metrics/persistent_memory_allocator.cc

Issue 2635303002: Create 'errors' histogram for failures. (Closed)
Patch Set: addressed review comments by asvitkine Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/metrics/persistent_memory_allocator.h" 5 #include "base/metrics/persistent_memory_allocator.h"
6 6
7 #include <assert.h> 7 #include <assert.h>
8 #include <algorithm> 8 #include <algorithm>
9 9
10 #if defined(OS_WIN) 10 #if defined(OS_WIN)
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
42 42
43 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> 43 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
44 // types rather than combined bitfield. 44 // types rather than combined bitfield.
45 45
46 // Flags stored in the flags_ field of the SharedMetaData structure below. 46 // Flags stored in the flags_ field of the SharedMetaData structure below.
47 enum : int { 47 enum : int {
48 kFlagCorrupt = 1 << 0, 48 kFlagCorrupt = 1 << 0,
49 kFlagFull = 1 << 1 49 kFlagFull = 1 << 1
50 }; 50 };
51 51
52 // Errors that are logged in "errors" histogram.
53 enum AllocatorError : int {
54 kMemoryIsCorrupt = 1,
55 };
56
52 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) { 57 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
53 uint32_t loaded_flags = flags->load(std::memory_order_relaxed); 58 uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
54 return (loaded_flags & flag) != 0; 59 return (loaded_flags & flag) != 0;
55 } 60 }
56 61
57 void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) { 62 void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
58 uint32_t loaded_flags = flags->load(std::memory_order_relaxed); 63 uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
59 for (;;) { 64 for (;;) {
60 uint32_t new_flags = (loaded_flags & ~flag) | flag; 65 uint32_t new_flags = (loaded_flags & ~flag) | flag;
61 // In the failue case, actual "flags" value stored in loaded_flags. 66 // In the failue case, actual "flags" value stored in loaded_flags.
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after
295 uint64_t id, 300 uint64_t id,
296 base::StringPiece name, 301 base::StringPiece name,
297 bool readonly) 302 bool readonly)
298 : mem_base_(static_cast<char*>(memory.base)), 303 : mem_base_(static_cast<char*>(memory.base)),
299 mem_type_(memory.type), 304 mem_type_(memory.type),
300 mem_size_(static_cast<uint32_t>(size)), 305 mem_size_(static_cast<uint32_t>(size)),
301 mem_page_(static_cast<uint32_t>((page_size ? page_size : size))), 306 mem_page_(static_cast<uint32_t>((page_size ? page_size : size))),
302 readonly_(readonly), 307 readonly_(readonly),
303 corrupt_(0), 308 corrupt_(0),
304 allocs_histogram_(nullptr), 309 allocs_histogram_(nullptr),
305 used_histogram_(nullptr) { 310 used_histogram_(nullptr),
311 errors_histogram_(nullptr) {
306 // These asserts ensure that the structures are 32/64-bit agnostic and meet 312 // These asserts ensure that the structures are 32/64-bit agnostic and meet
307 // all the requirements of use within the allocator. They access private 313 // all the requirements of use within the allocator. They access private
308 // definitions and so cannot be moved to the global scope. 314 // definitions and so cannot be moved to the global scope.
309 static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16, 315 static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
310 "struct is not portable across different natural word widths"); 316 "struct is not portable across different natural word widths");
311 static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56, 317 static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56,
312 "struct is not portable across different natural word widths"); 318 "struct is not portable across different natural word widths");
313 319
314 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, 320 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
315 "BlockHeader is not a multiple of kAllocAlignment"); 321 "BlockHeader is not a multiple of kAllocAlignment");
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
436 return ""; 442 return "";
437 } 443 }
438 444
439 return name_cstr; 445 return name_cstr;
440 } 446 }
441 447
442 void PersistentMemoryAllocator::CreateTrackingHistograms( 448 void PersistentMemoryAllocator::CreateTrackingHistograms(
443 base::StringPiece name) { 449 base::StringPiece name) {
444 if (name.empty() || readonly_) 450 if (name.empty() || readonly_)
445 return; 451 return;
452 std::string name_string = name.as_string();
446 453
447 std::string name_string = name.as_string(); 454 DCHECK(!allocs_histogram_);
455 allocs_histogram_ = Histogram::FactoryGet(
456 "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
457 HistogramBase::kUmaTargetedHistogramFlag);
458
448 DCHECK(!used_histogram_); 459 DCHECK(!used_histogram_);
449 used_histogram_ = LinearHistogram::FactoryGet( 460 used_histogram_ = LinearHistogram::FactoryGet(
450 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21, 461 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
451 HistogramBase::kUmaTargetedHistogramFlag); 462 HistogramBase::kUmaTargetedHistogramFlag);
452 463
453 DCHECK(!allocs_histogram_); 464 DCHECK(!errors_histogram_);
454 allocs_histogram_ = Histogram::FactoryGet( 465 errors_histogram_ = SparseHistogram::FactoryGet(
455 "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50, 466 "UMA.PersistentAllocator." + name_string + ".Errors",
456 HistogramBase::kUmaTargetedHistogramFlag); 467 HistogramBase::kUmaTargetedHistogramFlag);
457 } 468 }
458 469
459 size_t PersistentMemoryAllocator::used() const { 470 size_t PersistentMemoryAllocator::used() const {
460 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed), 471 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
461 mem_size_); 472 mem_size_);
462 } 473 }
463 474
464 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference( 475 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
465 const void* memory, 476 const void* memory,
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
643 block->type_id.store(type_id, std::memory_order_relaxed); 654 block->type_id.store(type_id, std::memory_order_relaxed);
644 return freeptr; 655 return freeptr;
645 } 656 }
646 } 657 }
647 658
648 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const { 659 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
649 uint32_t remaining = std::max( 660 uint32_t remaining = std::max(
650 mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed), 661 mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
651 (uint32_t)sizeof(BlockHeader)); 662 (uint32_t)sizeof(BlockHeader));
652 meminfo->total = mem_size_; 663 meminfo->total = mem_size_;
653 meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader); 664 meminfo->free = remaining - sizeof(BlockHeader);
654 } 665 }
655 666
656 void PersistentMemoryAllocator::MakeIterable(Reference ref) { 667 void PersistentMemoryAllocator::MakeIterable(Reference ref) {
657 DCHECK(!readonly_); 668 DCHECK(!readonly_);
658 if (IsCorrupt()) 669 if (IsCorrupt())
659 return; 670 return;
660 volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false); 671 volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
661 if (!block) // invalid reference 672 if (!block) // invalid reference
662 return; 673 return;
663 if (block->next.load(std::memory_order_acquire) != 0) // Already iterable. 674 if (block->next.load(std::memory_order_acquire) != 0) // Already iterable.
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
711 } 722 }
712 } 723 }
713 724
714 // The "corrupted" state is held both locally and globally (shared). The 725 // The "corrupted" state is held both locally and globally (shared). The
715 // shared flag can't be trusted since a malicious actor could overwrite it. 726 // shared flag can't be trusted since a malicious actor could overwrite it.
716 // Because corruption can be detected during read-only operations such as 727 // Because corruption can be detected during read-only operations such as
717 // iteration, this method may be called by other "const" methods. In this 728 // iteration, this method may be called by other "const" methods. In this
718 // case, it's safe to discard the constness and modify the local flag and 729 // case, it's safe to discard the constness and modify the local flag and
719 // maybe even the shared flag if the underlying data isn't actually read-only. 730 // maybe even the shared flag if the underlying data isn't actually read-only.
720 void PersistentMemoryAllocator::SetCorrupt() const { 731 void PersistentMemoryAllocator::SetCorrupt() const {
721 LOG(ERROR) << "Corruption detected in shared-memory segment."; 732 if (!corrupt_.load(std::memory_order_relaxed) &&
722 const_cast<std::atomic<bool>*>(&corrupt_)->store(true, 733 !CheckFlag(
723 std::memory_order_relaxed); 734 const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
735 kFlagCorrupt)) {
736 LOG(ERROR) << "Corruption detected in shared-memory segment.";
737 RecordError(kMemoryIsCorrupt);
738 }
739
740 corrupt_.store(true, std::memory_order_relaxed);
724 if (!readonly_) { 741 if (!readonly_) {
725 SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags), 742 SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
726 kFlagCorrupt); 743 kFlagCorrupt);
727 } 744 }
728 } 745 }
729 746
730 bool PersistentMemoryAllocator::IsCorrupt() const { 747 bool PersistentMemoryAllocator::IsCorrupt() const {
731 if (corrupt_.load(std::memory_order_relaxed) || 748 if (corrupt_.load(std::memory_order_relaxed) ||
732 CheckFlag(&shared_meta()->flags, kFlagCorrupt)) { 749 CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
733 SetCorrupt(); // Make sure all indicators are set. 750 SetCorrupt(); // Make sure all indicators are set.
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
775 if (type_id != 0 && 792 if (type_id != 0 &&
776 block->type_id.load(std::memory_order_relaxed) != type_id) { 793 block->type_id.load(std::memory_order_relaxed) != type_id) {
777 return nullptr; 794 return nullptr;
778 } 795 }
779 } 796 }
780 797
781 // Return pointer to block data. 798 // Return pointer to block data.
782 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref); 799 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
783 } 800 }
784 801
802 void PersistentMemoryAllocator::RecordError(int error) const {
803 if (errors_histogram_)
804 errors_histogram_->Add(error);
805 }
806
785 const volatile void* PersistentMemoryAllocator::GetBlockData( 807 const volatile void* PersistentMemoryAllocator::GetBlockData(
786 Reference ref, 808 Reference ref,
787 uint32_t type_id, 809 uint32_t type_id,
788 uint32_t size) const { 810 uint32_t size) const {
789 DCHECK(size > 0); 811 DCHECK(size > 0);
790 const volatile BlockHeader* block = 812 const volatile BlockHeader* block =
791 GetBlock(ref, type_id, size, false, false); 813 GetBlock(ref, type_id, size, false, false);
792 if (!block) 814 if (!block)
793 return nullptr; 815 return nullptr;
794 return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader); 816 return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
923 945
924 // static 946 // static
925 bool FilePersistentMemoryAllocator::IsFileAcceptable( 947 bool FilePersistentMemoryAllocator::IsFileAcceptable(
926 const MemoryMappedFile& file, 948 const MemoryMappedFile& file,
927 bool read_only) { 949 bool read_only) {
928 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only); 950 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
929 } 951 }
930 #endif // !defined(OS_NACL) 952 #endif // !defined(OS_NACL)
931 953
932 } // namespace base 954 } // namespace base
OLDNEW
« no previous file with comments | « base/metrics/persistent_memory_allocator.h ('k') | tools/metrics/histograms/extract_histograms.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698