Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(516)

Unified Diff: components/metrics/persistent_system_profile.cc

Issue 2907543003: Support persistent system profiles. (Closed)
Patch Set: clean up; still not actually called Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: components/metrics/persistent_system_profile.cc
diff --git a/components/metrics/persistent_system_profile.cc b/components/metrics/persistent_system_profile.cc
new file mode 100644
index 0000000000000000000000000000000000000000..9f7c327a48dac5e0d31c9407e819cc51b3b0ae54
--- /dev/null
+++ b/components/metrics/persistent_system_profile.cc
@@ -0,0 +1,290 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/persistent_system_profile.h"
+
+#include "base/atomicops.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/stl_util.h"
+
+namespace metrics {
+
+namespace {
+
+// To provide atomic addition of records so that there is no confusion between
+// writers and readers, all of the metadata about a record is contained in a
+// structure that can be stored as a single atomic 32-bit word.
+union RecordHeader {
+ struct {
+ unsigned continued : 1; // Flag indicating if there is more after this.
+ unsigned type : 7; // The type of this record.
+ unsigned amount : 24; // The amount of data to follow.
+ } as_parts;
+ base::subtle::Atomic32 as_atomic;
+};
+
+constexpr uint32_t kTypeIdSystemProfile = 0x330A7150; // SHA1(SystemProfile)
+constexpr size_t kSystemProfileAllocSize = 4 << 10; // 4 KiB
+constexpr size_t kMaxRecordSize = (1 << 24) - sizeof(RecordHeader);
+
+static_assert(sizeof(RecordHeader) == sizeof(base::subtle::Atomic32),
+ "bad RecordHeader size");
+
+// Calculate the size of a record based on the amount of data. This adds room
+// for the record header and rounds up to the next multiple of the record-header
+// size.
+size_t CalcRecordSize(size_t data_amount) {
Alexei Svitkine (slow) 2017/05/26 18:01:52 Nit: Calculate
bcwhite 2017/05/29 18:32:26 Done.
+ return (data_amount + sizeof(RecordHeader) + sizeof(RecordHeader) - 1) &
+ ~(sizeof(RecordHeader) - 1);
+}
+
+} // namespace
+
+PersistentSystemProfile::RecordAllocator::RecordAllocator(
+ base::PersistentMemoryAllocator* memory_allocator,
+ size_t min_size)
+ : allocator_(memory_allocator), tail_reference_(0), end_offset_(0) {
+ AddSegment(min_size);
+}
+
+PersistentSystemProfile::RecordAllocator::RecordAllocator(
+ const base::PersistentMemoryAllocator* memory_allocator)
+ : allocator_(
+ const_cast<base::PersistentMemoryAllocator*>(memory_allocator)),
+ tail_reference_(0),
+ end_offset_(0) {}
+
+bool PersistentSystemProfile::RecordAllocator::Matches(
+ const base::PersistentMemoryAllocator* allocator) const {
+ return allocator_ == allocator;
+}
+
+void PersistentSystemProfile::RecordAllocator::Reset() {
+ // Clear the first word of all blocks so they're known to be "empty".
+ tail_reference_ = 0;
+ while (NextSegment()) {
+ base::subtle::Atomic32* block =
+ allocator_->GetAsArray<base::subtle::Atomic32>(tail_reference_,
+ kTypeIdSystemProfile, 1);
+ DCHECK(block);
+ base::subtle::NoBarrier_Store(block, 0);
+ }
+
+ // Reset member variables.
+ tail_reference_ = 0;
+ end_offset_ = 0;
+}
+
+bool PersistentSystemProfile::RecordAllocator::Write(
+ RecordType type,
+ const std::string& record) {
+ const char* data = record.data();
+ size_t size = record.size();
Alexei Svitkine (slow) 2017/05/26 18:01:52 Nit: remaining_size
bcwhite 2017/05/29 18:32:26 Done.
+
+ RecordHeader header;
+ header.as_atomic = 0;
+ header.as_parts.type = type;
+
+ // Make sure there is a place to write. Failure will cause a nullptr |block|
+ // when accessed below and will then return false.
+ if (tail_reference_ == 0)
+ AddSegment(size);
+
+ // Write records until everything has been stored.
+ do {
+ char* block =
+ allocator_->GetAsArray<char>(tail_reference_, kTypeIdSystemProfile,
+ base::PersistentMemoryAllocator::kSizeAny);
+ if (!block)
+ return false;
+
+ // The size of the allocation limits how big a record can be stored.
+ const size_t alloc_size =
+ allocator_->GetAllocSize(tail_reference_) & ~(sizeof(RecordHeader) - 1);
Alexei Svitkine (slow) 2017/05/26 18:01:52 This is very cryptic. You get the size as size_t.
bcwhite 2017/05/29 18:32:26 The tilde (~) before it flips the bits so it's cle
+ size_t amount = std::min(size, kMaxRecordSize);
+ amount = std::min(amount, alloc_size - end_offset_ - sizeof(header));
Alexei Svitkine (slow) 2017/05/26 18:01:52 Nit: size_to_write instead of amount
bcwhite 2017/05/29 18:32:26 Done.
+
+ // Write the data and the record header.
+ header.as_parts.amount = amount;
+ header.as_parts.continued = (amount < size);
+ size_t offset = end_offset_;
+ end_offset_ += CalcRecordSize(amount);
+ DCHECK_GE(alloc_size, end_offset_);
+ if (end_offset_ < alloc_size) {
+ // An empty record header has to be next before this one gets written.
+ base::subtle::NoBarrier_Store(
+ reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_), 0);
+ }
+ memcpy(block + offset + sizeof(header), data, amount);
Alexei Svitkine (slow) 2017/05/26 18:01:52 I feel like this code would be clearer with anothe
bcwhite 2017/05/29 18:32:26 Done.
+ base::subtle::Release_Store(
+ reinterpret_cast<base::subtle::Atomic32*>(block), header.as_atomic);
+
+ // Account for what was stored and prepare for follow-on records with any
+ // remaining data.
+ data += amount;
+ size -= amount;
+
+ // If the block got filled, create a new one after the current one. Failure
+ // will cause a nullptr |block| when next accessed.
+ if (end_offset_ == alloc_size)
+ AddSegment(size);
+ } while (size > 0);
+
+ return true;
+}
+
+bool PersistentSystemProfile::RecordAllocator::Read(RecordType* type,
+ std::string* record) const {
+ RecordType found_type = kUnusedSpace;
+ record->clear();
+
+ // Find something to read. Failure will cause a nullptr |block| when accessed
+ // below and will then return false.
+ if (tail_reference_ == 0)
+ NextSegment();
+
+ bool continued;
+ do {
+ char* block =
+ allocator_->GetAsArray<char>(tail_reference_, kTypeIdSystemProfile,
+ base::PersistentMemoryAllocator::kSizeAny);
+ if (!block)
+ return false; // Can't read anything more.
+
+ // Get the actual alloc size to verify that record doesn't extend past it.
+ const size_t alloc_size =
+ allocator_->GetAllocSize(tail_reference_) & ~(sizeof(RecordHeader) - 1);
+ DCHECK_GT(alloc_size, end_offset_);
+
+ // Get and validate the record header.
+ RecordHeader header;
+ header.as_atomic = base::subtle::Acquire_Load(
+ reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_));
+ continued = !!header.as_parts.continued;
+ if (header.as_parts.type == kUnusedSpace) {
Alexei Svitkine (slow) 2017/05/26 18:01:52 I don't see where kUnusedSpace is written? Is it m
bcwhite 2017/05/29 18:32:26 Done.
+ return false; // End of records.
+ } else if (found_type == kUnusedSpace) {
+ found_type = static_cast<RecordType>(header.as_parts.type);
+ } else if (found_type != header.as_parts.type) {
+ NOTREACHED(); // Continuation didn't match start of record.
+ return false;
+ }
+ size_t amount = header.as_parts.amount;
+ if (amount < sizeof(header) ||
+ end_offset_ + sizeof(header) + amount > alloc_size) {
+ NOTREACHED(); // Invalid header amount.
+ return false;
+ }
+
+ // Append the record data to the output string.
+ record->append(block + sizeof(header), amount);
+ end_offset_ += CalcRecordSize(amount);
+ DCHECK_GE(alloc_size, end_offset_);
+
+ // If the end of the block has been reached, advance to the next one.
+ // Failure will cause a nullptr |block| when next accessed.
+ if (end_offset_ == alloc_size)
+ NextSegment();
+ } while (continued);
+
+ *type = found_type;
+ return true;
+}
+
+bool PersistentSystemProfile::RecordAllocator::NextSegment() const {
+ base::PersistentMemoryAllocator::Iterator iter(allocator_, tail_reference_);
+ tail_reference_ = iter.GetNextOfType(kTypeIdSystemProfile);
+ end_offset_ = 0;
+ return tail_reference_ != 0;
+}
+
+bool PersistentSystemProfile::RecordAllocator::AddSegment(size_t min_size) {
+ if (NextSegment()) {
+ // The first record-header should have been zeroed as part of the allocation
+ // or by the "reset" procedure.
+ DCHECK_EQ(0, base::subtle::NoBarrier_Load(
+ allocator_->GetAsArray<base::subtle::Atomic32>(
+ tail_reference_, kTypeIdSystemProfile, 1)));
+ return true;
+ }
+
+ DCHECK_EQ(0U, tail_reference_);
+ DCHECK_EQ(0U, end_offset_);
+
+ size_t size = std::max(CalcRecordSize(min_size), kSystemProfileAllocSize);
+
+ uint32_t ref = allocator_->Allocate(size, kTypeIdSystemProfile);
+ if (!ref)
+ return false; // Allocator must be full.
+ allocator_->MakeIterable(ref);
+
+ tail_reference_ = ref;
+ return true;
+}
+
+PersistentSystemProfile::PersistentSystemProfile() {}
+
+PersistentSystemProfile::~PersistentSystemProfile() {}
+
+void PersistentSystemProfile::RegisterPersistentAllocator(
+ base::PersistentMemoryAllocator* memory_allocator) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ // Create and store the allocator. A |min_size| of "1" ensures that a memory
+ // block is reserved now.
+ RecordAllocator allocator(memory_allocator, 1);
+ allocators_.push_back(std::move(allocator));
+}
+
+void PersistentSystemProfile::DeregisterPersistentAllocator(
+ base::PersistentMemoryAllocator* memory_allocator) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ // This would be more efficient with a std::map but it's not expected that
+ // allocators will get deregistered with any frequency, if at all.
+ for (auto iter = allocators_.begin(); iter != allocators_.end(); ++iter) {
+ if (iter->Matches(memory_allocator)) {
+ allocators_.erase(iter);
+ return;
+ }
+ }
Alexei Svitkine (slow) 2017/05/26 18:01:52 Nit: Use EraseIf() from base/stl_util.h?
bcwhite 2017/05/29 18:32:26 Done.
+ NOTREACHED();
+}
+
+void PersistentSystemProfile::SetSystemProfile(
+ const SystemProfileProto& system_profile) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ std::string serialized_profile;
+ if (!system_profile.SerializeToString(&serialized_profile)) {
+ NOTREACHED();
+ return;
+ }
+
+ for (auto& allocator : allocators_) {
+ // A full system profile always starts fresh.
+ allocator.Reset();
+ // Write out the serialized profile.
+ allocator.Write(kSystemProfileProto, serialized_profile);
+ }
+}
+
+// static
+bool PersistentSystemProfile::GetSystemProfile(
+ SystemProfileProto* system_profile,
+ const base::PersistentMemoryAllocator* memory_allocator) {
+ const RecordAllocator records(memory_allocator);
+
+ RecordType type;
+ std::string record;
+ if (!records.Read(&type, &record))
+ return false;
+ if (type != kSystemProfileProto)
+ return false;
+
+ system_profile->ParseFromString(record);
+ return true;
Alexei Svitkine (slow) 2017/05/26 18:01:52 Return the result of system_profile->ParseFromStri
bcwhite 2017/05/29 18:32:26 Done.
+}
+
+} // namespace metrics

Powered by Google App Engine
This is Rietveld 408576698