OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2017 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "components/metrics/persistent_system_profile.h" | |
6 | |
7 #include "base/atomicops.h" | |
8 #include "base/memory/singleton.h" | |
9 #include "base/metrics/persistent_memory_allocator.h" | |
10 #include "base/stl_util.h" | |
11 | |
12 namespace metrics { | |
13 | |
14 namespace { | |
15 | |
16 // To provide atomic addition of records so that there is no confusion between | |
17 // writers and readers, all of the metadata about a record is contained in a | |
18 // structure that can be stored as a single atomic 32-bit word. | |
19 union RecordHeader { | |
20 struct { | |
21 unsigned continued : 1; // Flag indicating if there is more after this. | |
22 unsigned type : 7; // The type of this record. | |
23 unsigned amount : 24; // The amount of data to follow. | |
24 } as_parts; | |
25 base::subtle::Atomic32 as_atomic; | |
26 }; | |
27 | |
28 constexpr uint32_t kTypeIdSystemProfile = 0x330A7150; // SHA1(SystemProfile) | |
29 constexpr size_t kSystemProfileAllocSize = 4 << 10; // 4 KiB | |
30 constexpr size_t kMaxRecordSize = (1 << 24) - sizeof(RecordHeader); | |
31 | |
32 static_assert(sizeof(RecordHeader) == sizeof(base::subtle::Atomic32), | |
33 "bad RecordHeader size"); | |
34 | |
35 // Calculate the size of a record based on the amount of data. This adds room | |
36 // for the record header and rounds up to the next multiple of the record-header | |
37 // size. | |
38 size_t CalculateRecordSize(size_t data_amount) { | |
39 return (data_amount + sizeof(RecordHeader) + sizeof(RecordHeader) - 1) & | |
40 ~(sizeof(RecordHeader) - 1); | |
41 } | |
42 | |
43 } // namespace | |
44 | |
45 PersistentSystemProfile::RecordAllocator::RecordAllocator( | |
46 base::PersistentMemoryAllocator* memory_allocator, | |
47 size_t min_size) | |
48 : allocator_(memory_allocator), | |
49 alloc_reference_(0), | |
50 alloc_size_(0), | |
51 end_offset_(0) { | |
52 AddSegment(min_size); | |
53 } | |
54 | |
55 PersistentSystemProfile::RecordAllocator::RecordAllocator( | |
56 const base::PersistentMemoryAllocator* memory_allocator) | |
57 : allocator_( | |
58 const_cast<base::PersistentMemoryAllocator*>(memory_allocator)), | |
59 alloc_reference_(0), | |
60 alloc_size_(0), | |
61 end_offset_(0) {} | |
62 | |
63 void PersistentSystemProfile::RecordAllocator::Reset() { | |
64 // Clear the first word of all blocks so they're known to be "empty". | |
65 alloc_reference_ = 0; | |
66 while (NextSegment()) { | |
67 base::subtle::Atomic32* block = | |
68 allocator_->GetAsArray<base::subtle::Atomic32>(alloc_reference_, | |
69 kTypeIdSystemProfile, 1); | |
70 DCHECK(block); | |
71 base::subtle::NoBarrier_Store(block, 0); | |
Alexei Svitkine (slow)
2017/05/29 19:48:32
Can you use the RecordHeader type here? This way i
bcwhite
2017/05/29 20:56:35
Done.
| |
72 } | |
73 | |
74 // Reset member variables. | |
75 alloc_reference_ = 0; | |
76 alloc_size_ = 0; | |
77 end_offset_ = 0; | |
78 } | |
79 | |
80 bool PersistentSystemProfile::RecordAllocator::Write( | |
81 RecordType type, | |
82 const std::string& record) { | |
83 const char* data = record.data(); | |
84 size_t remaining_size = record.size(); | |
85 | |
86 // Allocate space and write records until everything has been stored. | |
87 do { | |
88 if (end_offset_ == alloc_size_) { | |
89 if (!AddSegment(remaining_size)) | |
90 return false; | |
91 } | |
92 // Write out as much of the data as possible. |data| and |remaining_size| | |
93 // are updated in place. | |
94 if (!WriteData(type, &data, &remaining_size)) | |
95 return false; | |
96 } while (remaining_size > 0); | |
97 | |
98 return true; | |
99 } | |
100 | |
101 bool PersistentSystemProfile::RecordAllocator::Read(RecordType* type, | |
102 std::string* record) const { | |
103 *type = kUnusedSpace; | |
104 record->clear(); | |
105 | |
106 // Access data and read records until everything has been loaded. | |
107 while (true) { | |
108 if (end_offset_ == alloc_size_) { | |
109 if (!NextSegment()) | |
110 return false; | |
111 } | |
112 if (ReadData(type, record)) | |
113 return *type != kUnusedSpace; | |
114 } | |
115 } | |
116 | |
117 bool PersistentSystemProfile::RecordAllocator::NextSegment() const { | |
118 base::PersistentMemoryAllocator::Iterator iter(allocator_, alloc_reference_); | |
119 alloc_reference_ = iter.GetNextOfType(kTypeIdSystemProfile); | |
120 alloc_size_ = allocator_->GetAllocSize(alloc_reference_); | |
121 end_offset_ = 0; | |
122 return alloc_reference_ != 0; | |
123 } | |
124 | |
125 bool PersistentSystemProfile::RecordAllocator::AddSegment(size_t min_size) { | |
126 if (NextSegment()) { | |
127 // The first record-header should have been zeroed as part of the allocation | |
128 // or by the "reset" procedure. | |
129 DCHECK_EQ(0, base::subtle::NoBarrier_Load( | |
130 allocator_->GetAsArray<base::subtle::Atomic32>( | |
131 alloc_reference_, kTypeIdSystemProfile, 1))); | |
132 return true; | |
133 } | |
134 | |
135 DCHECK_EQ(0U, alloc_reference_); | |
136 DCHECK_EQ(0U, end_offset_); | |
137 | |
138 size_t size = | |
139 std::max(CalculateRecordSize(min_size), kSystemProfileAllocSize); | |
140 | |
141 uint32_t ref = allocator_->Allocate(size, kTypeIdSystemProfile); | |
142 if (!ref) | |
143 return false; // Allocator must be full. | |
144 allocator_->MakeIterable(ref); | |
145 | |
146 alloc_reference_ = ref; | |
147 alloc_size_ = allocator_->GetAllocSize(ref); | |
148 return true; | |
149 } | |
150 | |
151 bool PersistentSystemProfile::RecordAllocator::WriteData( | |
152 RecordType type, | |
153 const char** data, | |
154 size_t* remaining_size) { | |
155 char* block = | |
156 allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile, | |
157 base::PersistentMemoryAllocator::kSizeAny); | |
158 if (!block) | |
159 return false; // It's bad if there is no accessible block. | |
160 | |
161 size_t write_size = std::min(*remaining_size, kMaxRecordSize); | |
162 write_size = | |
163 std::min(write_size, alloc_size_ - end_offset_ - sizeof(RecordHeader)); | |
164 | |
165 // Write the data and the record header. | |
166 RecordHeader header; | |
167 header.as_atomic = 0; | |
168 header.as_parts.type = type; | |
169 header.as_parts.amount = write_size; | |
170 header.as_parts.continued = (write_size < *remaining_size); | |
171 size_t offset = end_offset_; | |
172 end_offset_ += CalculateRecordSize(write_size); | |
173 DCHECK_GE(alloc_size_, end_offset_); | |
174 if (end_offset_ < alloc_size_) { | |
175 // An empty record header has to be next before this one gets written. | |
176 base::subtle::NoBarrier_Store( | |
177 reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_), 0); | |
178 } | |
179 memcpy(block + offset + sizeof(header), *data, write_size); | |
180 base::subtle::Release_Store(reinterpret_cast<base::subtle::Atomic32*>(block), | |
181 header.as_atomic); | |
182 | |
183 // Account for what was stored and prepare for follow-on records with any | |
184 // remaining data. | |
185 *data += write_size; | |
186 *remaining_size -= write_size; | |
187 | |
188 return true; | |
189 } | |
190 | |
191 bool PersistentSystemProfile::RecordAllocator::ReadData( | |
192 RecordType* type, | |
193 std::string* record) const { | |
194 DCHECK_GT(alloc_size_, end_offset_); | |
195 | |
196 char* block = | |
197 allocator_->GetAsArray<char>(alloc_reference_, kTypeIdSystemProfile, | |
198 base::PersistentMemoryAllocator::kSizeAny); | |
199 if (!block) { | |
200 *type = kUnusedSpace; | |
201 return true; // No more data. | |
202 } | |
203 | |
204 // Get and validate the record header. | |
205 RecordHeader header; | |
206 header.as_atomic = base::subtle::Acquire_Load( | |
207 reinterpret_cast<base::subtle::Atomic32*>(block + end_offset_)); | |
208 bool continued = !!header.as_parts.continued; | |
209 if (header.as_parts.type == kUnusedSpace) { | |
210 *type = kUnusedSpace; | |
211 return true; // End of all records. | |
212 } else if (*type == kUnusedSpace) { | |
213 *type = static_cast<RecordType>(header.as_parts.type); | |
214 } else if (*type != header.as_parts.type) { | |
215 NOTREACHED(); // Continuation didn't match start of record. | |
216 *type = kUnusedSpace; | |
217 record->clear(); | |
218 return false; | |
219 } | |
220 size_t read_size = header.as_parts.amount; | |
221 if (read_size < sizeof(header) || | |
222 end_offset_ + sizeof(header) + read_size > alloc_size_) { | |
223 NOTREACHED(); // Invalid header amount. | |
224 *type = kUnusedSpace; | |
225 return true; // Don't try again. | |
226 } | |
227 | |
228 // Append the record data to the output string. | |
229 record->append(block + sizeof(header), read_size); | |
230 end_offset_ += CalculateRecordSize(read_size); | |
231 DCHECK_GE(alloc_size_, end_offset_); | |
232 | |
233 return !continued; | |
234 } | |
235 | |
236 PersistentSystemProfile::PersistentSystemProfile() {} | |
237 | |
238 PersistentSystemProfile::~PersistentSystemProfile() {} | |
239 | |
240 void PersistentSystemProfile::RegisterPersistentAllocator( | |
241 base::PersistentMemoryAllocator* memory_allocator) { | |
242 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); | |
243 | |
244 // Create and store the allocator. A |min_size| of "1" ensures that a memory | |
245 // block is reserved now. | |
246 RecordAllocator allocator(memory_allocator, 1); | |
247 allocators_.push_back(std::move(allocator)); | |
248 } | |
249 | |
250 void PersistentSystemProfile::DeregisterPersistentAllocator( | |
251 base::PersistentMemoryAllocator* memory_allocator) { | |
252 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); | |
253 | |
254 // This would be more efficient with a std::map but it's not expected that | |
255 // allocators will get deregistered with any frequency, if at all. | |
256 base::EraseIf(allocators_, [=](RecordAllocator& records) { | |
257 return records.allocator() == memory_allocator; | |
258 }); | |
259 } | |
260 | |
261 void PersistentSystemProfile::SetSystemProfile( | |
262 const SystemProfileProto& system_profile) { | |
263 DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); | |
264 | |
265 if (allocators_.empty()) | |
266 return; | |
267 | |
268 std::string serialized_profile; | |
269 if (!system_profile.SerializeToString(&serialized_profile)) { | |
270 NOTREACHED(); | |
271 return; | |
272 } | |
273 | |
274 for (auto& allocator : allocators_) { | |
275 // A full system profile always starts fresh. | |
276 allocator.Reset(); | |
277 // Write out the serialized profile. | |
278 allocator.Write(kSystemProfileProto, serialized_profile); | |
279 } | |
280 } | |
281 | |
282 // static | |
283 bool PersistentSystemProfile::GetSystemProfile( | |
284 SystemProfileProto* system_profile, | |
285 const base::PersistentMemoryAllocator* memory_allocator) { | |
286 const RecordAllocator records(memory_allocator); | |
287 | |
288 RecordType type; | |
289 std::string record; | |
290 if (!records.Read(&type, &record)) | |
291 return false; | |
292 if (type != kSystemProfileProto) | |
293 return false; | |
294 | |
295 return system_profile->ParseFromString(record); | |
296 } | |
297 | |
298 GlobalPersistentSystemProfile* GlobalPersistentSystemProfile::GetInstance() { | |
299 return base::Singleton< | |
300 GlobalPersistentSystemProfile, | |
301 base::LeakySingletonTraits<GlobalPersistentSystemProfile>>::get(); | |
302 } | |
303 | |
304 } // namespace metrics | |
OLD | NEW |