Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(141)

Side by Side Diff: base/metrics/persistent_memory_allocator.cc

Issue 2662083002: Create 'errors' histogram for failures. (Closed)
Patch Set: Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/metrics/persistent_memory_allocator.h" 5 #include "base/metrics/persistent_memory_allocator.h"
6 6
7 #include <assert.h> 7 #include <assert.h>
8 #include <algorithm> 8 #include <algorithm>
9 9
10 #if defined(OS_WIN) 10 #if defined(OS_WIN)
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
42 42
43 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> 43 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
44 // types rather than combined bitfield. 44 // types rather than combined bitfield.
45 45
46 // Flags stored in the flags_ field of the SharedMetaData structure below. 46 // Flags stored in the flags_ field of the SharedMetaData structure below.
47 enum : int { 47 enum : int {
48 kFlagCorrupt = 1 << 0, 48 kFlagCorrupt = 1 << 0,
49 kFlagFull = 1 << 1 49 kFlagFull = 1 << 1
50 }; 50 };
51 51
52 // Errors that are logged in "errors" histogram.
53 enum AllocatorError : int {
54 kMemoryIsCorrupt = 1,
55 };
56
52 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) { 57 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
53 uint32_t loaded_flags = flags->load(std::memory_order_relaxed); 58 uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
54 return (loaded_flags & flag) != 0; 59 return (loaded_flags & flag) != 0;
55 } 60 }
56 61
57 void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) { 62 void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
58 uint32_t loaded_flags = flags->load(std::memory_order_relaxed); 63 uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
59 for (;;) { 64 for (;;) {
60 uint32_t new_flags = (loaded_flags & ~flag) | flag; 65 uint32_t new_flags = (loaded_flags & ~flag) | flag;
61 // In the failue case, actual "flags" value stored in loaded_flags. 66 // In the failue case, actual "flags" value stored in loaded_flags.
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after
293 uint64_t id, 298 uint64_t id,
294 base::StringPiece name, 299 base::StringPiece name,
295 bool readonly) 300 bool readonly)
296 : mem_base_(static_cast<char*>(memory.base)), 301 : mem_base_(static_cast<char*>(memory.base)),
297 mem_type_(memory.type), 302 mem_type_(memory.type),
298 mem_size_(static_cast<uint32_t>(size)), 303 mem_size_(static_cast<uint32_t>(size)),
299 mem_page_(static_cast<uint32_t>((page_size ? page_size : size))), 304 mem_page_(static_cast<uint32_t>((page_size ? page_size : size))),
300 readonly_(readonly), 305 readonly_(readonly),
301 corrupt_(0), 306 corrupt_(0),
302 allocs_histogram_(nullptr), 307 allocs_histogram_(nullptr),
303 used_histogram_(nullptr) { 308 used_histogram_(nullptr),
309 errors_histogram_(nullptr) {
304 // These asserts ensure that the structures are 32/64-bit agnostic and meet 310 // These asserts ensure that the structures are 32/64-bit agnostic and meet
305 // all the requirements of use within the allocator. They access private 311 // all the requirements of use within the allocator. They access private
306 // definitions and so cannot be moved to the global scope. 312 // definitions and so cannot be moved to the global scope.
307 static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16, 313 static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
308 "struct is not portable across different natural word widths"); 314 "struct is not portable across different natural word widths");
309 static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56, 315 static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56,
310 "struct is not portable across different natural word widths"); 316 "struct is not portable across different natural word widths");
311 317
312 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, 318 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
313 "BlockHeader is not a multiple of kAllocAlignment"); 319 "BlockHeader is not a multiple of kAllocAlignment");
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
434 return ""; 440 return "";
435 } 441 }
436 442
437 return name_cstr; 443 return name_cstr;
438 } 444 }
439 445
440 void PersistentMemoryAllocator::CreateTrackingHistograms( 446 void PersistentMemoryAllocator::CreateTrackingHistograms(
441 base::StringPiece name) { 447 base::StringPiece name) {
442 if (name.empty() || readonly_) 448 if (name.empty() || readonly_)
443 return; 449 return;
450 std::string name_string = name.as_string();
444 451
445 std::string name_string = name.as_string(); 452 DCHECK(!allocs_histogram_);
453 allocs_histogram_ = Histogram::FactoryGet(
454 "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
455 HistogramBase::kUmaTargetedHistogramFlag);
456
446 DCHECK(!used_histogram_); 457 DCHECK(!used_histogram_);
447 used_histogram_ = LinearHistogram::FactoryGet( 458 used_histogram_ = LinearHistogram::FactoryGet(
448 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21, 459 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
449 HistogramBase::kUmaTargetedHistogramFlag); 460 HistogramBase::kUmaTargetedHistogramFlag);
450 461
451 DCHECK(!allocs_histogram_); 462 DCHECK(!errors_histogram_);
452 allocs_histogram_ = Histogram::FactoryGet( 463 errors_histogram_ = SparseHistogram::FactoryGet(
453 "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50, 464 "UMA.PersistentAllocator." + name_string + ".Errors",
454 HistogramBase::kUmaTargetedHistogramFlag); 465 HistogramBase::kUmaTargetedHistogramFlag);
455 } 466 }
456 467
457 size_t PersistentMemoryAllocator::used() const { 468 size_t PersistentMemoryAllocator::used() const {
458 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed), 469 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
459 mem_size_); 470 mem_size_);
460 } 471 }
461 472
462 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference( 473 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
463 const void* memory, 474 const void* memory,
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
645 block->type_id.store(type_id, std::memory_order_relaxed); 656 block->type_id.store(type_id, std::memory_order_relaxed);
646 return freeptr; 657 return freeptr;
647 } 658 }
648 } 659 }
649 660
650 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const { 661 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
651 uint32_t remaining = std::max( 662 uint32_t remaining = std::max(
652 mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed), 663 mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
653 (uint32_t)sizeof(BlockHeader)); 664 (uint32_t)sizeof(BlockHeader));
654 meminfo->total = mem_size_; 665 meminfo->total = mem_size_;
655 meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader); 666 meminfo->free = remaining - sizeof(BlockHeader);
656 } 667 }
657 668
658 void PersistentMemoryAllocator::MakeIterable(Reference ref) { 669 void PersistentMemoryAllocator::MakeIterable(Reference ref) {
659 DCHECK(!readonly_); 670 DCHECK(!readonly_);
660 if (IsCorrupt()) 671 if (IsCorrupt())
661 return; 672 return;
662 volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false); 673 volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
663 if (!block) // invalid reference 674 if (!block) // invalid reference
664 return; 675 return;
665 if (block->next.load(std::memory_order_acquire) != 0) // Already iterable. 676 if (block->next.load(std::memory_order_acquire) != 0) // Already iterable.
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
713 } 724 }
714 } 725 }
715 726
716 // The "corrupted" state is held both locally and globally (shared). The 727 // The "corrupted" state is held both locally and globally (shared). The
717 // shared flag can't be trusted since a malicious actor could overwrite it. 728 // shared flag can't be trusted since a malicious actor could overwrite it.
718 // Because corruption can be detected during read-only operations such as 729 // Because corruption can be detected during read-only operations such as
719 // iteration, this method may be called by other "const" methods. In this 730 // iteration, this method may be called by other "const" methods. In this
720 // case, it's safe to discard the constness and modify the local flag and 731 // case, it's safe to discard the constness and modify the local flag and
721 // maybe even the shared flag if the underlying data isn't actually read-only. 732 // maybe even the shared flag if the underlying data isn't actually read-only.
722 void PersistentMemoryAllocator::SetCorrupt() const { 733 void PersistentMemoryAllocator::SetCorrupt() const {
723 LOG(ERROR) << "Corruption detected in shared-memory segment."; 734 if (!corrupt_.load(std::memory_order_relaxed) &&
724 const_cast<std::atomic<bool>*>(&corrupt_)->store(true, 735 !CheckFlag(
725 std::memory_order_relaxed); 736 const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
737 kFlagCorrupt)) {
738 LOG(ERROR) << "Corruption detected in shared-memory segment.";
739 RecordError(kMemoryIsCorrupt);
740 }
741
742 corrupt_.store(true, std::memory_order_relaxed);
726 if (!readonly_) { 743 if (!readonly_) {
727 SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags), 744 SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
728 kFlagCorrupt); 745 kFlagCorrupt);
729 } 746 }
730 } 747 }
731 748
732 bool PersistentMemoryAllocator::IsCorrupt() const { 749 bool PersistentMemoryAllocator::IsCorrupt() const {
733 if (corrupt_.load(std::memory_order_relaxed) || 750 if (corrupt_.load(std::memory_order_relaxed) ||
734 CheckFlag(&shared_meta()->flags, kFlagCorrupt)) { 751 CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
735 SetCorrupt(); // Make sure all indicators are set. 752 SetCorrupt(); // Make sure all indicators are set.
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
777 if (type_id != 0 && 794 if (type_id != 0 &&
778 block->type_id.load(std::memory_order_relaxed) != type_id) { 795 block->type_id.load(std::memory_order_relaxed) != type_id) {
779 return nullptr; 796 return nullptr;
780 } 797 }
781 } 798 }
782 799
783 // Return pointer to block data. 800 // Return pointer to block data.
784 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref); 801 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
785 } 802 }
786 803
804 void PersistentMemoryAllocator::RecordError(int error) const {
805 if (errors_histogram_)
806 errors_histogram_->Add(error);
807 }
808
787 const volatile void* PersistentMemoryAllocator::GetBlockData( 809 const volatile void* PersistentMemoryAllocator::GetBlockData(
788 Reference ref, 810 Reference ref,
789 uint32_t type_id, 811 uint32_t type_id,
790 uint32_t size) const { 812 uint32_t size) const {
791 DCHECK(size > 0); 813 DCHECK(size > 0);
792 const volatile BlockHeader* block = 814 const volatile BlockHeader* block =
793 GetBlock(ref, type_id, size, false, false); 815 GetBlock(ref, type_id, size, false, false);
794 if (!block) 816 if (!block)
795 return nullptr; 817 return nullptr;
796 return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader); 818 return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
925 947
926 // static 948 // static
927 bool FilePersistentMemoryAllocator::IsFileAcceptable( 949 bool FilePersistentMemoryAllocator::IsFileAcceptable(
928 const MemoryMappedFile& file, 950 const MemoryMappedFile& file,
929 bool read_only) { 951 bool read_only) {
930 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only); 952 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
931 } 953 }
932 #endif // !defined(OS_NACL) 954 #endif // !defined(OS_NACL)
933 955
934 } // namespace base 956 } // namespace base
OLDNEW
« no previous file with comments | « base/metrics/persistent_memory_allocator.h ('k') | tools/metrics/histograms/extract_histograms.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698