Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "components/metrics/persistent_system_profile.h" | |
| 6 | |
| 7 #include "base/atomicops.h" | |
| 8 #include "base/metrics/persistent_memory_allocator.h" | |
| 9 #include "base/stl_util.h" | |
| 10 | |
| 11 namespace metrics { | |
| 12 | |
| 13 namespace { | |
| 14 | |
| 15 // To provide atomic addition of records so that there is no confusion between | |
| 16 // writers and readers, all of the metadata about a record is contained in a | |
| 17 // structure that can be stored as a single atomic 32-bit word. | |
| 18 union RecordHeader { | |
| 19 struct { | |
| 20 unsigned continued : 1; // Flag indicating if there is more after this. | |
| 21 unsigned type : 7; // The type of this record. | |
| 22 unsigned amount : 24; // The amount of data to follow. | |
| 23 } as_parts; | |
| 24 base::subtle::Atomic32 as_atomic; | |
| 25 }; | |
| 26 | |
| 27 constexpr uint32_t kTypeIdSystemProfile = 0x330A7150; // SHA1(SystemProfile) | |
| 28 constexpr size_t kSystemProfileAllocSize = 4 << 10; // 4 KiB | |
| 29 constexpr size_t kMaxRecordSize = (1 << 24) - sizeof(RecordHeader); | |
| 30 | |
| 31 static_assert(sizeof(RecordHeader) == sizeof(base::subtle::Atomic32), | |
| 32 "bad RecordHeader size"); | |
| 33 | |
| 34 // Calculate the size of a record based on the amount of data. This adds room | |
| 35 // for the record header and rounds up to the next multiple of the record-header | |
| 36 // size. | |
| 37 size_t CalcRecordSize(size_t data_amount) { | |
|
Alexei Svitkine (slow)
2017/05/26 18:01:52
Nit: Calculate
bcwhite
2017/05/29 18:32:26
Done.
| |
| 38 return (data_amount + sizeof(RecordHeader) + sizeof(RecordHeader) - 1) & | |
| 39 ~(sizeof(RecordHeader) - 1); | |
| 40 } | |
| 41 | |
| 42 } // namespace | |
| 43 | |
| 44 PersistentSystemProfile::RecordAllocator::RecordAllocator( | |
| 45 base::PersistentMemoryAllocator* memory_allocator, | |
| 46 size_t min_size) | |
| 47 : allocator_(memory_allocator), tail_reference_(0), end_offset_(0) { | |
| 48 AddSegment(min_size); | |
| 49 } | |
| 50 | |
| 51 PersistentSystemProfile::RecordAllocator::RecordAllocator( | |
| 52 const base::PersistentMemoryAllocator* memory_allocator) | |
| 53 : allocator_( | |
| 54 const_cast<base::PersistentMemoryAllocator*>(memory_allocator)), | |
| 55 tail_reference_(0), | |
| 56 end_offset_(0) {} | |
| 57 | |
| 58 bool PersistentSystemProfile::RecordAllocator::Matches( | |
| 59 const base::PersistentMemoryAllocator* allocator) const { | |
| 60 return allocator_ == allocator; | |
| 61 } | |
| 62 | |
| 63 void PersistentSystemProfile::RecordAllocator::Reset() { | |
| 64 // Clear the first word of all blocks so they're known to be "empty". | |
| 65 tail_reference_ = 0; | |
| 66 while (NextSegment()) { | |
| 67 base::subtle::Atomic32* block = | |
| 68 allocator_->GetAsArray<base::subtle::Atomic32>(tail_reference_, | |
| 69 kTypeIdSystemProfile, 1); | |
| 70 DCHECK(block); | |
| 71 base::subtle::NoBarrier_Store(block, 0); | |
| 72 } | |
| 73 | |
| 74 // Reset member variables. | |
| 75 tail_reference_ = 0; | |
| 76 end_offset_ = 0; | |
| 77 } | |
| 78 | |
| 79 bool PersistentSystemProfile::RecordAllocator::Write( | |
| 80 RecordType type, | |
| 81 const std::string& record) { | |
| 82 const char* data = record.data(); | |
| 83 size_t size = record.size(); | |
|
Alexei Svitkine (slow)
2017/05/26 18:01:52
Nit: remaining_size
bcwhite
2017/05/29 18:32:26
Done.
| |
| 84 | |
| 85 RecordHeader header; | |
| 86 header.as_atomic = 0; | |
| 87 header.as_parts.type = type; | |
| 88 | |
| 89 // Make sure there is a place to write. Failure will cause a nullptr |block| | |
| 90 // when accessed below and will then return false. | |
| 91 if (tail_reference_ == 0) | |
| 92 AddSegment(size); | |
| 93 | |
| 94 // Write records until everything has been stored. | |
| 95 do { | |
| 96 char* block = | |
| 97 allocator_->GetAsArray<char>(tail_reference_, kTypeIdSystemProfile, | |
| 98 base::PersistentMemoryAllocator::kSizeAny); | |
| 99 if (!block) | |
| 100 return false; | |
| 101 | |
| 102 // The size of the allocation limits how big a record can be stored. | |
| 103 const size_t alloc_size = | |
| 104 allocator_->GetAllocSize(tail_reference_) & ~(sizeof(RecordHeader) - 1); | |
|
Alexei Svitkine (slow)
2017/05/26 18:01:52
This is very cryptic.
You get the size as size_t.
bcwhite
2017/05/29 18:32:26
The tilde (~) before it flips the bits so it's cle
| |
| 105 size_t amount = std::min(size, kMaxRecordSize); | |
| 106 amount = std::min(amount, alloc_size - end_offset_ - sizeof(header)); | |
|
Alexei Svitkine (slow)
2017/05/26 18:01:52
Nit: size_to_write instead of amount
bcwhite
2017/05/29 18:32:26
Done.
| |
| 107 | |
| 108 // Write the data and the record header. | |
| 109 header.as_parts.amount = amount; | |
| 110 header.as_parts.continued = (amount < size); | |
| 111 size_t offset = end_offset_; | |
| 112 end_offset_ += CalcRecordSize(amount); | |
| 113 DCHECK_GE(alloc_size, end_offset_); | |
| 114 if (end_offset_ < alloc_size) { | |
| 115 // An empty record header has to be next before this one gets written. | |
| 116 base::subtle::NoBarrier_Store( | |
| 117 reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_), 0); | |
| 118 } | |
| 119 memcpy(block + offset + sizeof(header), data, amount); | |
|
Alexei Svitkine (slow)
2017/05/26 18:01:52
I feel like this code would be clearer with anothe
bcwhite
2017/05/29 18:32:26
Done.
| |
| 120 base::subtle::Release_Store( | |
| 121 reinterpret_cast<base::subtle::Atomic32*>(block), header.as_atomic); | |
| 122 | |
| 123 // Account for what was stored and prepare for follow-on records with any | |
| 124 // remaining data. | |
| 125 data += amount; | |
| 126 size -= amount; | |
| 127 | |
| 128 // If the block got filled, create a new one after the current one. Failure | |
| 129 // will cause a nullptr |block| when next accessed. | |
| 130 if (end_offset_ == alloc_size) | |
| 131 AddSegment(size); | |
| 132 } while (size > 0); | |
| 133 | |
| 134 return true; | |
| 135 } | |
| 136 | |
| 137 bool PersistentSystemProfile::RecordAllocator::Read(RecordType* type, | |
| 138 std::string* record) const { | |
| 139 RecordType found_type = kUnusedSpace; | |
| 140 record->clear(); | |
| 141 | |
| 142 // Find something to read. Failure will cause a nullptr |block| when accessed | |
| 143 // below and will then return false. | |
| 144 if (tail_reference_ == 0) | |
| 145 NextSegment(); | |
| 146 | |
| 147 bool continued; | |
| 148 do { | |
| 149 char* block = | |
| 150 allocator_->GetAsArray<char>(tail_reference_, kTypeIdSystemProfile, | |
| 151 base::PersistentMemoryAllocator::kSizeAny); | |
| 152 if (!block) | |
| 153 return false; // Can't read anything more. | |
| 154 | |
| 155 // Get the actual alloc size to verify that record doesn't extend past it. | |
| 156 const size_t alloc_size = | |
| 157 allocator_->GetAllocSize(tail_reference_) & ~(sizeof(RecordHeader) - 1); | |
| 158 DCHECK_GT(alloc_size, end_offset_); | |
| 159 | |
| 160 // Get and validate the record header. | |
| 161 RecordHeader header; | |
| 162 header.as_atomic = base::subtle::Acquire_Load( | |
| 163 reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_)); | |
| 164 continued = !!header.as_parts.continued; | |
| 165 if (header.as_parts.type == kUnusedSpace) { | |
|
Alexei Svitkine (slow)
2017/05/26 18:01:52
I don't see where kUnusedSpace is written? Is it m
bcwhite
2017/05/29 18:32:26
Done.
| |
| 166 return false; // End of records. | |
| 167 } else if (found_type == kUnusedSpace) { | |
| 168 found_type = static_cast<RecordType>(header.as_parts.type); | |
| 169 } else if (found_type != header.as_parts.type) { | |
| 170 NOTREACHED(); // Continuation didn't match start of record. | |
| 171 return false; | |
| 172 } | |
| 173 size_t amount = header.as_parts.amount; | |
| 174 if (amount < sizeof(header) || | |
| 175 end_offset_ + sizeof(header) + amount > alloc_size) { | |
| 176 NOTREACHED(); // Invalid header amount. | |
| 177 return false; | |
| 178 } | |
| 179 | |
| 180 // Append the record data to the output string. | |
| 181 record->append(block + sizeof(header), amount); | |
| 182 end_offset_ += CalcRecordSize(amount); | |
| 183 DCHECK_GE(alloc_size, end_offset_); | |
| 184 | |
| 185 // If the end of the block has been reached, advance to the next one. | |
| 186 // Failure will cause a nullptr |block| when next accessed. | |
| 187 if (end_offset_ == alloc_size) | |
| 188 NextSegment(); | |
| 189 } while (continued); | |
| 190 | |
| 191 *type = found_type; | |
| 192 return true; | |
| 193 } | |
| 194 | |
| 195 bool PersistentSystemProfile::RecordAllocator::NextSegment() const { | |
| 196 base::PersistentMemoryAllocator::Iterator iter(allocator_, tail_reference_); | |
| 197 tail_reference_ = iter.GetNextOfType(kTypeIdSystemProfile); | |
| 198 end_offset_ = 0; | |
| 199 return tail_reference_ != 0; | |
| 200 } | |
| 201 | |
| 202 bool PersistentSystemProfile::RecordAllocator::AddSegment(size_t min_size) { | |
| 203 if (NextSegment()) { | |
| 204 // The first record-header should have been zeroed as part of the allocation | |
| 205 // or by the "reset" procedure. | |
| 206 DCHECK_EQ(0, base::subtle::NoBarrier_Load( | |
| 207 allocator_->GetAsArray<base::subtle::Atomic32>( | |
| 208 tail_reference_, kTypeIdSystemProfile, 1))); | |
| 209 return true; | |
| 210 } | |
| 211 | |
| 212 DCHECK_EQ(0U, tail_reference_); | |
| 213 DCHECK_EQ(0U, end_offset_); | |
| 214 | |
| 215 size_t size = std::max(CalcRecordSize(min_size), kSystemProfileAllocSize); | |
| 216 | |
| 217 uint32_t ref = allocator_->Allocate(size, kTypeIdSystemProfile); | |
| 218 if (!ref) | |
| 219 return false; // Allocator must be full. | |
| 220 allocator_->MakeIterable(ref); | |
| 221 | |
| 222 tail_reference_ = ref; | |
| 223 return true; | |
| 224 } | |
| 225 | |
| 226 PersistentSystemProfile::PersistentSystemProfile() {} | |
| 227 | |
| 228 PersistentSystemProfile::~PersistentSystemProfile() {} | |
| 229 | |
| 230 void PersistentSystemProfile::RegisterPersistentAllocator( | |
| 231 base::PersistentMemoryAllocator* memory_allocator) { | |
| 232 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); | |
| 233 | |
| 234 // Create and store the allocator. A |min_size| of "1" ensures that a memory | |
| 235 // block is reserved now. | |
| 236 RecordAllocator allocator(memory_allocator, 1); | |
| 237 allocators_.push_back(std::move(allocator)); | |
| 238 } | |
| 239 | |
| 240 void PersistentSystemProfile::DeregisterPersistentAllocator( | |
| 241 base::PersistentMemoryAllocator* memory_allocator) { | |
| 242 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); | |
| 243 | |
| 244 // This would be more efficient with a std::map but it's not expected that | |
| 245 // allocators will get deregistered with any frequency, if at all. | |
| 246 for (auto iter = allocators_.begin(); iter != allocators_.end(); ++iter) { | |
| 247 if (iter->Matches(memory_allocator)) { | |
| 248 allocators_.erase(iter); | |
| 249 return; | |
| 250 } | |
| 251 } | |
|
Alexei Svitkine (slow)
2017/05/26 18:01:52
Nit: Use EraseIf() from base/stl_util.h?
bcwhite
2017/05/29 18:32:26
Done.
| |
| 252 NOTREACHED(); | |
| 253 } | |
| 254 | |
| 255 void PersistentSystemProfile::SetSystemProfile( | |
| 256 const SystemProfileProto& system_profile) { | |
| 257 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); | |
| 258 | |
| 259 std::string serialized_profile; | |
| 260 if (!system_profile.SerializeToString(&serialized_profile)) { | |
| 261 NOTREACHED(); | |
| 262 return; | |
| 263 } | |
| 264 | |
| 265 for (auto& allocator : allocators_) { | |
| 266 // A full system profile always starts fresh. | |
| 267 allocator.Reset(); | |
| 268 // Write out the serialized profile. | |
| 269 allocator.Write(kSystemProfileProto, serialized_profile); | |
| 270 } | |
| 271 } | |
| 272 | |
| 273 // static | |
| 274 bool PersistentSystemProfile::GetSystemProfile( | |
| 275 SystemProfileProto* system_profile, | |
| 276 const base::PersistentMemoryAllocator* memory_allocator) { | |
| 277 const RecordAllocator records(memory_allocator); | |
| 278 | |
| 279 RecordType type; | |
| 280 std::string record; | |
| 281 if (!records.Read(&type, &record)) | |
| 282 return false; | |
| 283 if (type != kSystemProfileProto) | |
| 284 return false; | |
| 285 | |
| 286 system_profile->ParseFromString(record); | |
| 287 return true; | |
|
Alexei Svitkine (slow)
2017/05/26 18:01:52
Return the result of system_profile->ParseFromStri
bcwhite
2017/05/29 18:32:26
Done.
| |
| 288 } | |
| 289 | |
| 290 } // namespace metrics | |
| OLD | NEW |