Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "components/metrics/persistent_system_profile.h" | |
| 6 | |
| 7 #include "base/atomicops.h" | |
| 8 #include "base/memory/singleton.h" | |
| 9 #include "base/metrics/persistent_memory_allocator.h" | |
| 10 #include "base/stl_util.h" | |
| 11 | |
| 12 namespace metrics { | |
| 13 | |
| 14 namespace { | |
| 15 | |
| 16 // To provide atomic addition of records so that there is no confusion between | |
| 17 // writers and readers, all of the metadata about a record is contained in a | |
| 18 // structure that can be stored as a single atomic 32-bit word. | |
| 19 union RecordHeader { | |
| 20 struct { | |
| 21 unsigned continued : 1; // Flag indicating if there is more after this. | |
| 22 unsigned type : 7; // The type of this record. | |
| 23 unsigned amount : 24; // The amount of data to follow. | |
| 24 } as_parts; | |
| 25 base::subtle::Atomic32 as_atomic; | |
| 26 }; | |
| 27 | |
| 28 constexpr uint32_t kTypeIdSystemProfile = 0x330A7150; // SHA1(SystemProfile) | |
| 29 constexpr size_t kSystemProfileAllocSize = 4 << 10; // 4 KiB | |
| 30 constexpr size_t kMaxRecordSize = (1 << 24) - sizeof(RecordHeader); | |
| 31 | |
| 32 static_assert(sizeof(RecordHeader) == sizeof(base::subtle::Atomic32), | |
| 33 "bad RecordHeader size"); | |
| 34 | |
| 35 // Calculate the size of a record based on the amount of data. This adds room | |
| 36 // for the record header and rounds up to the next multiple of the record-header | |
| 37 // size. | |
| 38 size_t CalculateRecordSize(size_t data_amount) { | |
| 39 return (data_amount + sizeof(RecordHeader) + sizeof(RecordHeader) - 1) & | |
| 40 ~(sizeof(RecordHeader) - 1); | |
| 41 } | |
| 42 | |
| 43 } // namespace | |
| 44 | |
| 45 PersistentSystemProfile::RecordAllocator::RecordAllocator( | |
| 46 base::PersistentMemoryAllocator* memory_allocator, | |
| 47 size_t min_size) | |
| 48 : allocator_(memory_allocator), | |
| 49 alloc_reference_(0), | |
| 50 alloc_size_(0), | |
| 51 end_offset_(0) { | |
| 52 AddSegment(min_size); | |
| 53 } | |
| 54 | |
| 55 PersistentSystemProfile::RecordAllocator::RecordAllocator( | |
| 56 const base::PersistentMemoryAllocator* memory_allocator) | |
| 57 : allocator_( | |
| 58 const_cast<base::PersistentMemoryAllocator*>(memory_allocator)), | |
| 59 alloc_reference_(0), | |
| 60 alloc_size_(0), | |
| 61 end_offset_(0) {} | |
| 62 | |
| 63 void PersistentSystemProfile::RecordAllocator::Reset() { | |
| 64 // Clear the first word of all blocks so they're known to be "empty". | |
| 65 alloc_reference_ = 0; | |
| 66 while (NextSegment()) { | |
| 67 // Get the block as a char* and cast it. It can't be fetched directly as | |
| 68 // an array of RecordHeader because that's not a fundamental type and only | |
| 69 // arrays of fundamental types are allowed. | |
| 70 RecordHeader* header = | |
| 71 reinterpret_cast<RecordHeader*>(allocator_->GetAsArray<char>( | |
| 72 alloc_reference_, kTypeIdSystemProfile, sizeof(RecordHeader))); | |
| 73 DCHECK(header); | |
| 74 base::subtle::NoBarrier_Store(&header->as_atomic, 0); | |
| 75 } | |
| 76 | |
| 77 // Reset member variables. | |
| 78 alloc_reference_ = 0; | |
| 79 alloc_size_ = 0; | |
| 80 end_offset_ = 0; | |
| 81 } | |
| 82 | |
| 83 bool PersistentSystemProfile::RecordAllocator::Write( | |
| 84 RecordType type, | |
| 85 const std::string& record) { | |
| 86 const char* data = record.data(); | |
| 87 size_t remaining_size = record.size(); | |
| 88 | |
| 89 // Allocate space and write records until everything has been stored. | |
| 90 do { | |
| 91 if (end_offset_ == alloc_size_) { | |
| 92 if (!AddSegment(remaining_size)) | |
| 93 return false; | |
| 94 } | |
| 95 // Write out as much of the data as possible. |data| and |remaining_size| | |
| 96 // are updated in place. | |
| 97 if (!WriteData(type, &data, &remaining_size)) | |
| 98 return false; | |
| 99 } while (remaining_size > 0); | |
| 100 | |
| 101 return true; | |
| 102 } | |
| 103 | |
| 104 bool PersistentSystemProfile::RecordAllocator::Read(RecordType* type, | |
| 105 std::string* record) const { | |
| 106 *type = kUnusedSpace; | |
| 107 record->clear(); | |
| 108 | |
| 109 // Access data and read records until everything has been loaded. | |
| 110 while (true) { | |
| 111 if (end_offset_ == alloc_size_) { | |
| 112 if (!NextSegment()) | |
| 113 return false; | |
| 114 } | |
| 115 if (ReadData(type, record)) | |
| 116 return *type != kUnusedSpace; | |
| 117 } | |
| 118 } | |
| 119 | |
| 120 bool PersistentSystemProfile::RecordAllocator::NextSegment() const { | |
| 121 base::PersistentMemoryAllocator::Iterator iter(allocator_, alloc_reference_); | |
| 122 alloc_reference_ = iter.GetNextOfType(kTypeIdSystemProfile); | |
| 123 alloc_size_ = allocator_->GetAllocSize(alloc_reference_); | |
| 124 end_offset_ = 0; | |
| 125 return alloc_reference_ != 0; | |
| 126 } | |
| 127 | |
| 128 bool PersistentSystemProfile::RecordAllocator::AddSegment(size_t min_size) { | |
| 129 if (NextSegment()) { | |
| 130 // The first record-header should have been zeroed as part of the allocation | |
| 131 // or by the "reset" procedure. | |
| 132 DCHECK_EQ(0, base::subtle::NoBarrier_Load( | |
| 133 allocator_->GetAsArray<base::subtle::Atomic32>( | |
| 134 alloc_reference_, kTypeIdSystemProfile, 1))); | |
| 135 return true; | |
| 136 } | |
| 137 | |
| 138 DCHECK_EQ(0U, alloc_reference_); | |
| 139 DCHECK_EQ(0U, end_offset_); | |
| 140 | |
| 141 size_t size = | |
| 142 std::max(CalculateRecordSize(min_size), kSystemProfileAllocSize); | |
| 143 | |
| 144 uint32_t ref = allocator_->Allocate(size, kTypeIdSystemProfile); | |
| 145 if (!ref) | |
| 146 return false; // Allocator must be full. | |
| 147 allocator_->MakeIterable(ref); | |
| 148 | |
| 149 alloc_reference_ = ref; | |
| 150 alloc_size_ = allocator_->GetAllocSize(ref); | |
| 151 return true; | |
| 152 } | |
| 153 | |
| 154 bool PersistentSystemProfile::RecordAllocator::WriteData( | |
| 155 RecordType type, | |
| 156 const char** data, | |
| 157 size_t* remaining_size) { | |
| 158 char* block = | |
| 159 allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile, | |
| 160 base::PersistentMemoryAllocator::kSizeAny); | |
| 161 if (!block) | |
| 162 return false; // It's bad if there is no accessible block. | |
| 163 | |
| 164 size_t write_size = std::min(*remaining_size, kMaxRecordSize); | |
| 165 write_size = | |
| 166 std::min(write_size, alloc_size_ - end_offset_ - sizeof(RecordHeader)); | |
| 167 | |
| 168 // Write the data and the record header. | |
| 169 RecordHeader header; | |
| 170 header.as_atomic = 0; | |
| 171 header.as_parts.type = type; | |
| 172 header.as_parts.amount = write_size; | |
| 173 header.as_parts.continued = (write_size < *remaining_size); | |
| 174 size_t offset = end_offset_; | |
| 175 end_offset_ += CalculateRecordSize(write_size); | |
| 176 DCHECK_GE(alloc_size_, end_offset_); | |
| 177 if (end_offset_ < alloc_size_) { | |
| 178 // An empty record header has to be next before this one gets written. | |
| 179 base::subtle::NoBarrier_Store( | |
| 180 reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_), 0); | |
| 181 } | |
| 182 memcpy(block + offset + sizeof(header), *data, write_size); | |
| 183 base::subtle::Release_Store(reinterpret_cast<base::subtle::Atomic32*>(block), | |
| 184 header.as_atomic); | |
| 185 | |
| 186 // Account for what was stored and prepare for follow-on records with any | |
| 187 // remaining data. | |
| 188 *data += write_size; | |
| 189 *remaining_size -= write_size; | |
| 190 | |
| 191 return true; | |
| 192 } | |
| 193 | |
| 194 bool PersistentSystemProfile::RecordAllocator::ReadData( | |
| 195 RecordType* type, | |
| 196 std::string* record) const { | |
| 197 DCHECK_GT(alloc_size_, end_offset_); | |
| 198 | |
| 199 char* block = | |
| 200 allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile, | |
| 201 base::PersistentMemoryAllocator::kSizeAny); | |
| 202 if (!block) { | |
| 203 *type = kUnusedSpace; | |
| 204 return true; // No more data. | |
| 205 } | |
| 206 | |
| 207 // Get and validate the record header. | |
| 208 RecordHeader header; | |
| 209 header.as_atomic = base::subtle::Acquire_Load( | |
| 210 reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_)); | |
| 211 bool continued = !!header.as_parts.continued; | |
| 212 if (header.as_parts.type == kUnusedSpace) { | |
| 213 *type = kUnusedSpace; | |
| 214 return true; // End of all records. | |
| 215 } else if (*type == kUnusedSpace) { | |
| 216 *type = static_cast<RecordType>(header.as_parts.type); | |
| 217 } else if (*type != header.as_parts.type) { | |
| 218 NOTREACHED(); // Continuation didn't match start of record. | |
| 219 *type = kUnusedSpace; | |
| 220 record->clear(); | |
| 221 return false; | |
| 222 } | |
| 223 size_t read_size = header.as_parts.amount; | |
| 224 if (read_size < sizeof(header) || | |
| 225 end_offset_ + sizeof(header) + read_size > alloc_size_) { | |
| 226 NOTREACHED(); // Invalid header amount. | |
| 227 *type = kUnusedSpace; | |
| 228 return true; // Don't try again. | |
| 229 } | |
| 230 | |
| 231 // Append the record data to the output string. | |
| 232 record->append(block + sizeof(header), read_size); | |
| 233 end_offset_ += CalculateRecordSize(read_size); | |
| 234 DCHECK_GE(alloc_size_, end_offset_); | |
| 235 | |
| 236 return !continued; | |
| 237 } | |
| 238 | |
| 239 PersistentSystemProfile::PersistentSystemProfile() {} | |
| 240 | |
| 241 PersistentSystemProfile::~PersistentSystemProfile() {} | |
| 242 | |
| 243 void PersistentSystemProfile::RegisterPersistentAllocator( | |
| 244 base::PersistentMemoryAllocator* memory_allocator) { | |
| 245 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); | |
| 246 | |
| 247 // Create and store the allocator. A |min_size| of "1" ensures that a memory | |
| 248 // block is reserved now. | |
| 249 RecordAllocator allocator(memory_allocator, 1); | |
| 250 allocators_.push_back(std::move(allocator)); | |
| 251 } | |
| 252 | |
| 253 void PersistentSystemProfile::DeregisterPersistentAllocator( | |
| 254 base::PersistentMemoryAllocator* memory_allocator) { | |
| 255 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); | |
| 256 | |
| 257 // This would be more efficient with a std::map but it's not expected that | |
| 258 // allocators will get deregistered with any frequency, if at all. | |
| 259 base::EraseIf(allocators_, [=](RecordAllocator& records) { | |
| 260 return records.allocator() == memory_allocator; | |
| 261 }); | |
| 262 } | |
| 263 | |
| 264 void PersistentSystemProfile::SetSystemProfile( | |
| 265 const std::string& serialized_profile) { | |
| 266 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); | |
| 267 | |
| 268 if (allocators_.empty() || serialized_profile.empty()) | |
|
Alexei Svitkine (slow)
2017/05/29 22:27:24
If serialized_profile is empty, I think this shoul
bcwhite
2017/05/30 01:19:12
The string can be empty if there is a serializatio
| |
| 269 return; | |
| 270 | |
| 271 for (auto& allocator : allocators_) { | |
| 272 // A full system profile always starts fresh. | |
| 273 allocator.Reset(); | |
| 274 // Write out the serialized profile. | |
| 275 allocator.Write(kSystemProfileProto, serialized_profile); | |
| 276 } | |
| 277 } | |
| 278 | |
| 279 // static | |
| 280 bool PersistentSystemProfile::GetSystemProfile( | |
| 281 const base::PersistentMemoryAllocator& memory_allocator, | |
| 282 SystemProfileProto* system_profile) { | |
| 283 const RecordAllocator records(&memory_allocator); | |
| 284 | |
| 285 RecordType type; | |
| 286 std::string record; | |
| 287 if (!records.Read(&type, &record)) | |
| 288 return false; | |
| 289 if (type != kSystemProfileProto) | |
| 290 return false; | |
| 291 | |
| 292 return system_profile->ParseFromString(record); | |
| 293 } | |
| 294 | |
| 295 GlobalPersistentSystemProfile* GlobalPersistentSystemProfile::GetInstance() { | |
| 296 return base::Singleton< | |
| 297 GlobalPersistentSystemProfile, | |
| 298 base::LeakySingletonTraits<GlobalPersistentSystemProfile>>::get(); | |
| 299 } | |
| 300 | |
| 301 } // namespace metrics | |
| OLD | NEW |