OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/metrics/persistent_memory_allocator.h" | |
6 | |
7 #include <assert.h> | |
8 #include <algorithm> | |
9 | |
10 #include "base/files/memory_mapped_file.h" | |
11 #include "base/logging.h" | |
12 #include "base/metrics/histogram_macros.h" | |
13 | |
14 namespace { | |
15 | |
16 // Required range of memory segment sizes. It has to fit in an unsigned 32-bit | |
17 // number and should be a power of 2 in order to accomodate almost any page | |
18 // size. | |
19 const uint32_t kSegmentMinSize = 1 << 10; // 1 KiB | |
20 const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB | |
21 | |
22 // All allocations and data-structures must be aligned to this byte boundary. | |
23 // Alignment as large as the physical bus between CPU and RAM is _required_ | |
24 // for some architectures, is simply more efficient on other CPUs, and | |
25 // generally a Good Idea(tm) for all platforms as it reduces/eliminates the | |
26 // chance that a type will span cache lines. Alignment mustn't be less | |
27 // than 8 to ensure proper alignment for all types. The rest is a balance | |
28 // between reducing spans across multiple cache lines and wasted space spent | |
29 // padding out allocations. An alignment of 16 would ensure that the block | |
30 // header structure always sits in a single cache line. An average of about | |
31 // 1/2 this value will be wasted with every allocation. | |
32 const uint32_t kAllocAlignment = 8; | |
33 | |
34 // A constant (random) value placed in the shared metadata to identify | |
35 // an already initialized memory segment. | |
36 const uint32_t kGlobalCookie = 0x408305DC; | |
37 | |
38 // The current version of the metadata. If updates are made that change | |
39 // the metadata, the version number can be queried to operate in a backward- | |
40 // compatible manner until the memory segment is completely re-initalized. | |
41 const uint32_t kGlobalVersion = 1; | |
42 | |
43 // Constant values placed in the block headers to indicate its state. | |
44 const uint32_t kBlockCookieFree = 0; | |
45 const uint32_t kBlockCookieQueue = 1; | |
46 const uint32_t kBlockCookieWasted = (uint32_t)-1; | |
47 const uint32_t kBlockCookieAllocated = 0xC8799269; | |
48 | |
49 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> | |
50 // types rather than combined bitfield. | |
51 | |
52 // Flags stored in the flags_ field of the SharedMetaData structure below. | |
53 enum : int { | |
54 kFlagCorrupt = 1 << 0, | |
55 kFlagFull = 1 << 1 | |
56 }; | |
57 | |
58 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) { | |
59 uint32_t loaded_flags = flags->load(); | |
60 return (loaded_flags & flag) != 0; | |
61 } | |
62 | |
63 void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) { | |
64 uint32_t loaded_flags = flags->load(); | |
65 for (;;) { | |
66 uint32_t new_flags = (loaded_flags & ~flag) | flag; | |
67 // In the failue case, actual "flags" value stored in loaded_flags. | |
68 if (flags->compare_exchange_weak(loaded_flags, new_flags)) | |
69 break; | |
70 } | |
71 } | |
72 | |
73 } // namespace | |
74 | |
75 namespace base { | |
76 | |
77 // The block-header is placed at the top of every allocation within the | |
78 // segment to describe the data that follows it. | |
79 struct PersistentMemoryAllocator::BlockHeader { | |
80 uint32_t size; // Number of bytes in this block, including header. | |
81 uint32_t cookie; // Constant value indicating completed allocation. | |
82 uint32_t type_id; // A number provided by caller indicating data type. | |
83 std::atomic<uint32_t> next; // Pointer to the next block when iterating. | |
84 }; | |
85 | |
86 // The shared metadata exists once at the top of the memory segment to | |
87 // describe the state of the allocator to all processes. | |
88 struct PersistentMemoryAllocator::SharedMetadata { | |
89 uint32_t cookie; // Some value that indicates complete initialization. | |
90 uint32_t size; // Total size of memory segment. | |
91 uint32_t page_size; // Paging size within memory segment. | |
92 uint32_t version; // Version code so upgrades don't break. | |
93 std::atomic<uint32_t> freeptr; // Offset/ref to first free space in segment. | |
94 std::atomic<uint32_t> flags; // Bitfield of information flags. | |
95 uint32_t id; // Arbitrary ID number given by creator. | |
96 uint32_t name; // Reference to stored name string. | |
97 uint32_t _padding_; // Padding to match required allocation size. | |
98 | |
99 // The "iterable" queue is an M&S Queue as described here, append-only: | |
100 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf | |
101 std::atomic<uint32_t> tailptr; // Last block available for iteration. | |
102 BlockHeader queue; // Empty block for linked-list head/tail. (must be last) | |
103 }; | |
104 | |
105 // The "queue" block header is used to detect "last node" so that zero/null | |
106 // can be used to indicate that it hasn't been added at all. It is part of | |
107 // the SharedMetadata structure which itself is always located at offset zero. | |
108 const PersistentMemoryAllocator::Reference | |
109 PersistentMemoryAllocator::kReferenceQueue = | |
110 offsetof(SharedMetadata, queue); | |
111 const PersistentMemoryAllocator::Reference | |
112 PersistentMemoryAllocator::kReferenceNull = 0; | |
113 | |
114 | |
115 // static | |
116 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base, | |
117 size_t size, | |
118 size_t page_size, | |
119 bool readonly) { | |
120 return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) && | |
121 (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) && | |
122 (size >= kSegmentMinSize || readonly) && | |
123 (size % kAllocAlignment == 0 || readonly) && | |
124 (page_size == 0 || size % page_size == 0 || readonly)); | |
125 } | |
126 | |
127 PersistentMemoryAllocator::PersistentMemoryAllocator(void* base, | |
128 size_t size, | |
129 size_t page_size, | |
130 uint32_t id, | |
131 const std::string& name, | |
132 bool readonly) | |
133 : mem_base_(static_cast<char*>(base)), | |
134 mem_size_(static_cast<uint32_t>(size)), | |
135 mem_page_(static_cast<uint32_t>((page_size ? page_size : size))), | |
136 readonly_(readonly), | |
137 corrupt_(0), | |
138 allocs_histogram_(nullptr), | |
139 used_histogram_(nullptr) { | |
140 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, | |
141 "BlockHeader is not a multiple of kAllocAlignment"); | |
142 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, | |
143 "SharedMetadata is not a multiple of kAllocAlignment"); | |
144 | |
145 // Ensure that memory segment is of acceptable size. | |
146 CHECK(IsMemoryAcceptable(base, size, page_size, readonly)); | |
147 | |
148 // These atomics operate inter-process and so must be lock-free. The local | |
149 // casts are to make sure it can be evaluated at compile time to a constant. | |
150 CHECK(((SharedMetadata*)0)->freeptr.is_lock_free()); | |
Alexander Potapenko
2016/01/13 18:34:00
Sorry, are you dereferencing a NULL pointer here?
bcwhite
2016/01/13 21:31:39
No. It's a static method of the type that doesn't
| |
151 CHECK(((SharedMetadata*)0)->flags.is_lock_free()); | |
152 CHECK(((BlockHeader*)0)->next.is_lock_free()); | |
153 CHECK(corrupt_.is_lock_free()); | |
154 | |
155 if (shared_meta()->cookie != kGlobalCookie) { | |
156 if (readonly) { | |
157 NOTREACHED(); | |
158 SetCorrupt(); | |
159 return; | |
160 } | |
161 | |
162 // This block is only executed when a completely new memory segment is | |
163 // being initialized. It's unshared and single-threaded... | |
164 volatile BlockHeader* const first_block = | |
165 reinterpret_cast<volatile BlockHeader*>(mem_base_ + | |
166 sizeof(SharedMetadata)); | |
167 if (shared_meta()->cookie != 0 || | |
168 shared_meta()->size != 0 || | |
169 shared_meta()->version != 0 || | |
170 shared_meta()->freeptr.load() != 0 || | |
171 shared_meta()->flags.load() != 0 || | |
172 shared_meta()->id != 0 || | |
173 shared_meta()->name != 0 || | |
174 shared_meta()->tailptr != 0 || | |
175 shared_meta()->queue.cookie != 0 || | |
176 shared_meta()->queue.next.load() != 0 || | |
177 first_block->size != 0 || | |
178 first_block->cookie != 0 || | |
179 first_block->type_id != 0 || | |
180 first_block->next != 0) { | |
181 // ...or something malicious has been playing with the metadata. | |
182 NOTREACHED(); | |
183 SetCorrupt(); | |
184 } | |
185 | |
186 // This is still safe to do even if corruption has been detected. | |
187 shared_meta()->cookie = kGlobalCookie; | |
188 shared_meta()->size = mem_size_; | |
189 shared_meta()->page_size = mem_page_; | |
190 shared_meta()->version = kGlobalVersion; | |
191 shared_meta()->id = id; | |
192 shared_meta()->freeptr.store(sizeof(SharedMetadata)); | |
193 | |
194 // Set up the queue of iterable allocations. | |
195 shared_meta()->queue.size = sizeof(BlockHeader); | |
196 shared_meta()->queue.cookie = kBlockCookieQueue; | |
197 shared_meta()->queue.next.store(kReferenceQueue); | |
198 shared_meta()->tailptr.store(kReferenceQueue); | |
199 | |
200 // Allocate space for the name so other processes can learn it. | |
201 if (!name.empty()) { | |
202 const size_t name_length = name.length() + 1; | |
203 shared_meta()->name = Allocate(name_length, 0); | |
204 char* name_cstr = GetAsObject<char>(shared_meta()->name, 0); | |
205 if (name_cstr) | |
206 strcpy(name_cstr, name.c_str()); | |
207 } | |
208 } else { | |
209 if (readonly) { | |
210 // For read-only access, validate reasonable ctor parameters. | |
211 DCHECK(mem_size_ >= shared_meta()->freeptr.load()); | |
212 } else { | |
213 // The allocator is attaching to a previously initialized segment of | |
214 // memory. Make sure the embedded data matches what has been passed. | |
215 if (shared_meta()->size != mem_size_ || | |
216 shared_meta()->page_size != mem_page_) { | |
217 NOTREACHED(); | |
218 SetCorrupt(); | |
219 } | |
220 } | |
221 } | |
222 } | |
223 | |
224 PersistentMemoryAllocator::~PersistentMemoryAllocator() { | |
225 // It's strictly forbidden to do any memory access here in case there is | |
226 // some issue with the underlying memory segment. The "Local" allocator | |
227 // makes use of this to allow deletion of the segment on the heap from | |
228 // within its destructor. | |
229 } | |
230 | |
231 uint32_t PersistentMemoryAllocator::Id() const { | |
232 return shared_meta()->id; | |
233 } | |
234 | |
235 const char* PersistentMemoryAllocator::Name() const { | |
236 Reference name_ref = shared_meta()->name; | |
237 const char* name_cstr = GetAsObject<char>(name_ref, 0); | |
238 if (!name_cstr) | |
239 return ""; | |
240 | |
241 size_t name_length = GetAllocSize(name_ref); | |
242 if (name_cstr[name_length - 1] != '\0') { | |
243 NOTREACHED(); | |
244 SetCorrupt(); | |
245 return ""; | |
246 } | |
247 | |
248 return name_cstr; | |
249 } | |
250 | |
251 void PersistentMemoryAllocator::CreateHistograms(const std::string& name) { | |
252 if (name.empty() || readonly_) | |
253 return; | |
254 | |
255 DCHECK(!used_histogram_); | |
256 used_histogram_ = Histogram::FactoryGet( | |
257 name + ".UsedKiB", 1, 256 << 10, 100, HistogramBase::kNoFlags); | |
258 | |
259 DCHECK(!allocs_histogram_); | |
260 allocs_histogram_ = Histogram::FactoryGet( | |
261 name + ".Allocs", 1, 10000, 50, HistogramBase::kNoFlags); | |
262 } | |
263 | |
264 size_t PersistentMemoryAllocator::used() const { | |
265 return std::min(shared_meta()->freeptr.load(), mem_size_); | |
266 } | |
267 | |
268 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const { | |
269 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); | |
270 if (!block) | |
271 return 0; | |
272 uint32_t size = block->size; | |
273 // Header was verified by GetBlock() but a malicious actor could change | |
274 // the value between there and here. Check it again. | |
275 if (size <= sizeof(BlockHeader) || ref + size >= mem_size_) | |
276 return 0; | |
277 return size - sizeof(BlockHeader); | |
278 } | |
279 | |
280 uint32_t PersistentMemoryAllocator::GetType(Reference ref) const { | |
281 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); | |
282 if (!block) | |
283 return 0; | |
284 return block->type_id; | |
285 } | |
286 | |
287 void PersistentMemoryAllocator::SetType(Reference ref, uint32_t type_id) { | |
288 DCHECK(!readonly_); | |
289 volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); | |
290 if (!block) | |
291 return; | |
292 block->type_id = type_id; | |
293 } | |
294 | |
295 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate( | |
296 size_t req_size, | |
297 uint32_t type_id) { | |
298 Reference ref = AllocateImpl(req_size, type_id); | |
299 if (ref) { | |
300 // Success: Record this allocation in usage stats (if active). | |
301 if (allocs_histogram_) | |
302 allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size)); | |
303 } else { | |
304 // Failure: Record an allocation of zero for tracking. | |
305 if (allocs_histogram_) | |
306 allocs_histogram_->Add(0); | |
307 } | |
308 return ref; | |
309 } | |
310 | |
311 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl( | |
312 size_t req_size, | |
313 uint32_t type_id) { | |
Alexander Potapenko
2016/01/14 10:54:16
OOC: did clang-format put the arguments on the dif
| |
314 DCHECK(!readonly_); | |
315 | |
316 // Validate req_size to ensure it won't overflow when used as 32-bit value. | |
317 if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) { | |
318 NOTREACHED(); | |
319 return kReferenceNull; | |
320 } | |
321 | |
322 // Round up the requested size, plus header, to the next allocation alignment. | |
323 uint32_t size = static_cast<uint32_t>(req_size + sizeof(BlockHeader)); | |
324 size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); | |
325 if (size <= sizeof(BlockHeader) || size > mem_page_) { | |
326 NOTREACHED(); | |
327 return kReferenceNull; | |
328 } | |
329 | |
330 // Get the current start of unallocated memory. Other threads may | |
331 // update this at any time and cause us to retry these operations. | |
332 // This value should be treated as "const" to avoid confusion through | |
333 // the code below but recognize that any failed compare-exchange operation | |
334 // involving it will cause it to be loaded with a more recent value. The | |
335 // code should either exit or restart the loop in that case. | |
336 /* const */ uint32_t freeptr = shared_meta()->freeptr.load(); | |
337 | |
338 // Allocation is lockless so we do all our caculation and then, if saving | |
339 // indicates a change has occurred since we started, scrap everything and | |
340 // start over. | |
341 for (;;) { | |
342 if (IsCorrupt()) | |
343 return kReferenceNull; | |
344 | |
345 if (freeptr + size > mem_size_) { | |
346 SetFlag(&shared_meta()->flags, kFlagFull); | |
347 return kReferenceNull; | |
348 } | |
349 | |
350 // Get pointer to the "free" block. If something has been allocated since | |
351 // the load of freeptr above, it is still safe as nothing will be written | |
352 // to that location until after the compare-exchange below. | |
353 volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true); | |
354 if (!block) { | |
355 SetCorrupt(); | |
356 return kReferenceNull; | |
357 } | |
358 | |
359 // An allocation cannot cross page boundaries. If it would, create a | |
360 // "wasted" block and begin again at the top of the next page. This | |
361 // area could just be left empty but we fill in the block header just | |
362 // for completeness sake. | |
363 const uint32_t page_free = mem_page_ - freeptr % mem_page_; | |
364 if (size > page_free) { | |
365 if (page_free <= sizeof(BlockHeader)) { | |
366 SetCorrupt(); | |
367 return kReferenceNull; | |
368 } | |
369 const uint32_t new_freeptr = freeptr + page_free; | |
370 if (shared_meta()->freeptr.compare_exchange_strong(freeptr, | |
371 new_freeptr)) { | |
372 block->size = page_free; | |
373 block->cookie = kBlockCookieWasted; | |
374 } | |
375 continue; | |
376 } | |
377 | |
378 // Don't leave a slice at the end of a page too small for anything. This | |
379 // can result in an allocation up to two alignment-sizes greater than the | |
380 // minimum required by requested-size + header + alignment. | |
381 if (page_free - size < sizeof(BlockHeader) + kAllocAlignment) | |
382 size = page_free; | |
383 | |
384 const uint32_t new_freeptr = freeptr + size; | |
385 if (new_freeptr > mem_size_) { | |
386 SetCorrupt(); | |
387 return kReferenceNull; | |
388 } | |
389 | |
390 // Save our work. Try again if another thread has completed an allocation | |
391 // while we were processing. A "weak" exchange would be permissable here | |
392 // because the code will just loop and try again but the above processing | |
393 // is significant so make the extra effort of a "strong" exchange. | |
394 if (!shared_meta()->freeptr.compare_exchange_strong(freeptr, new_freeptr)) | |
395 continue; | |
396 | |
397 // Given that all memory was zeroed before ever being given to an instance | |
398 // of this class and given that we only allocate in a monotomic fashion | |
399 // going forward, it must be that the newly allocated block is completely | |
400 // full of zeros. If we find anything in the block header that is NOT a | |
401 // zero then something must have previously run amuck through memory, | |
402 // writing beyond the allocated space and into unallocated space. | |
403 if (block->size != 0 || | |
404 block->cookie != kBlockCookieFree || | |
405 block->type_id != 0 || | |
406 block->next.load() != 0) { | |
407 SetCorrupt(); | |
408 return kReferenceNull; | |
409 } | |
410 | |
411 block->size = size; | |
412 block->cookie = kBlockCookieAllocated; | |
413 block->type_id = type_id; | |
414 return freeptr; | |
415 } | |
416 } | |
417 | |
418 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const { | |
419 uint32_t remaining = std::max(mem_size_ - shared_meta()->freeptr.load(), | |
420 (uint32_t)sizeof(BlockHeader)); | |
421 meminfo->total = mem_size_; | |
422 meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader); | |
423 } | |
424 | |
425 void PersistentMemoryAllocator::MakeIterable(Reference ref) { | |
426 DCHECK(!readonly_); | |
427 if (IsCorrupt()) | |
428 return; | |
429 volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false); | |
430 if (!block) // invalid reference | |
431 return; | |
432 if (block->next.load(std::memory_order_acquire) != 0) // Already iterable. | |
433 return; | |
434 block->next.store(kReferenceQueue, std::memory_order_release); // New tail. | |
435 | |
436 // Try to add this block to the tail of the queue. May take multiple tries. | |
437 // If so, tail will be automatically updated with a more recent value during | |
438 // compare-exchange operations. | |
439 uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire); | |
440 for (;;) { | |
441 // Acquire the current tail-pointer released by previous call to this | |
442 // method and validate it. | |
443 block = GetBlock(tail, 0, 0, true, false); | |
444 if (!block) { | |
445 SetCorrupt(); | |
446 return; | |
447 } | |
448 | |
449 // Try to insert the block at the tail of the queue. The tail node always | |
450 // has an existing value of kReferenceQueue; if that is somehow not the | |
451 // existing value then another thread has acted in the meantime. A "strong" | |
452 // exchange is necessary so the "else" block does not get executed when | |
453 // that is not actually the case (which can happen with a "weak" exchange). | |
454 uint32_t next = kReferenceQueue; // Will get replaced with existing value. | |
455 if (block->next.compare_exchange_strong(next, ref, | |
456 std::memory_order_acq_rel, | |
457 std::memory_order_acquire)) { | |
458 // Update the tail pointer to the new offset. If the "else" clause did | |
459 // not exist, then this could be a simple Release_Store to set the new | |
460 // value but because it does, it's possible that other threads could add | |
461 // one or more nodes at the tail before reaching this point. We don't | |
462 // have to check the return value because it either operates correctly | |
463 // or the exact same operation has already been done (by the "else" | |
464 // clause) on some other thread. | |
465 shared_meta()->tailptr.compare_exchange_strong(tail, ref, | |
466 std::memory_order_release, | |
467 std::memory_order_relaxed); | |
468 return; | |
469 } else { | |
470 // In the unlikely case that a thread crashed or was killed between the | |
471 // update of "next" and the update of "tailptr", it is necessary to | |
472 // perform the operation that would have been done. There's no explicit | |
473 // check for crash/kill which means that this operation may also happen | |
474 // even when the other thread is in perfect working order which is what | |
475 // necessitates the CompareAndSwap above. | |
476 shared_meta()->tailptr.compare_exchange_strong(tail, next, | |
477 std::memory_order_acq_rel, | |
478 std::memory_order_acquire); | |
479 } | |
480 } | |
481 } | |
482 | |
483 void PersistentMemoryAllocator::CreateIterator(Iterator* state, | |
484 Reference starting_after) const { | |
485 if (starting_after) { | |
486 // Ensure that the starting point is a valid, iterable block. | |
487 const volatile BlockHeader* block = | |
488 GetBlock(starting_after, 0, 0, false, false); | |
489 if (!block || !block->next.load()) { | |
490 NOTREACHED(); | |
491 starting_after = kReferenceQueue; | |
492 } | |
493 } else { | |
494 // A zero beginning is really the Queue reference. | |
495 starting_after = kReferenceQueue; | |
496 } | |
497 | |
498 state->last = starting_after; | |
499 state->niter = 0; | |
500 } | |
501 | |
502 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetNextIterable( | |
503 Iterator* state, | |
504 uint32_t* type_id) const { | |
505 const volatile BlockHeader* block = GetBlock(state->last, 0, 0, true, false); | |
506 if (!block) // invalid iterator state | |
507 return kReferenceNull; | |
508 | |
509 // The compiler and CPU can freely reorder all memory accesses on which | |
510 // there are no dependencies. It could, for example, move the load of | |
511 // "freeptr" above this point because there are no explicit dependencies | |
512 // between it and "next". If it did, however, then another block could | |
513 // be queued after that but before the following load meaning there is | |
514 // one more queued block than the future "detect loop by having more | |
515 // blocks that could fit before freeptr" will allow. | |
516 // | |
517 // By "acquiring" the "next" value here, it's synchronized to the enqueue | |
518 // of the node which in turn is synchronized to the allocation (which sets | |
519 // freeptr). Thus, the scenario above cannot happen. | |
520 uint32_t next = block->next.load(std::memory_order_acquire); | |
521 block = GetBlock(next, 0, 0, false, false); | |
522 if (!block) // no next allocation in queue | |
523 return kReferenceNull; | |
524 | |
525 // Memory corruption could cause a loop in the list. We need to detect | |
526 // that so as to not cause an infinite loop in the caller. We do this | |
527 // simply by making sure we don't iterate more than the absolute maximum | |
528 // number of allocations that could have been made. Callers are likely | |
529 // to loop multiple times before it is detected but at least it stops. | |
530 uint32_t freeptr = std::min( | |
531 shared_meta()->freeptr.load(std::memory_order_acquire), | |
532 mem_size_); | |
533 if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) { | |
534 SetCorrupt(); | |
535 return kReferenceNull; | |
536 } | |
537 | |
538 state->last = next; | |
539 state->niter++; | |
540 *type_id = block->type_id; | |
541 | |
542 return next; | |
543 } | |
544 | |
545 // The "corrupted" state is held both locally and globally (shared). The | |
546 // shared flag can't be trusted since a malicious actor could overwrite it. | |
547 // Because corruption can be detected during read-only operations such as | |
548 // iteration, this method may be called by other "const" methods. In this | |
549 // case, it's safe to discard the constness and modify the local flag and | |
550 // maybe even the shared flag if the underlying data isn't actually read-only. | |
551 void PersistentMemoryAllocator::SetCorrupt() const { | |
552 LOG(ERROR) << "Corruption detected in shared-memory segment."; | |
553 const_cast<std::atomic<bool>*>(&corrupt_)->store(true); | |
554 if (!readonly_) | |
555 SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags), | |
556 kFlagCorrupt); | |
557 } | |
558 | |
559 bool PersistentMemoryAllocator::IsCorrupt() const { | |
560 if (corrupt_.load() || CheckFlag(&shared_meta()->flags, kFlagCorrupt)) { | |
561 SetCorrupt(); // Make sure all indicators are set. | |
562 return true; | |
563 } | |
564 return false; | |
565 } | |
566 | |
567 bool PersistentMemoryAllocator::IsFull() const { | |
568 return CheckFlag(&shared_meta()->flags, kFlagFull); | |
569 } | |
570 | |
571 // Dereference a block |ref| and ensure that it's valid for the desired | |
572 // |type_id| and |size|. |special| indicates that we may try to access block | |
573 // headers not available to callers but still accessed by this module. By | |
574 // having internal dereferences go through this same function, the allocator | |
575 // is hardened against corruption. | |
576 const volatile PersistentMemoryAllocator::BlockHeader* | |
577 PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id, | |
578 uint32_t size, bool queue_ok, | |
579 bool free_ok) const { | |
580 // Validation of parameters. | |
581 if (ref % kAllocAlignment != 0) | |
582 return nullptr; | |
583 if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata))) | |
584 return nullptr; | |
585 size += sizeof(BlockHeader); | |
586 if (ref + size > mem_size_) | |
587 return nullptr; | |
588 | |
589 // Validation of referenced block-header. | |
590 if (!free_ok) { | |
591 uint32_t freeptr = shared_meta()->freeptr.load(); | |
592 if (ref + size > freeptr) | |
593 return nullptr; | |
594 const volatile BlockHeader* const block = | |
595 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); | |
596 if (block->size < size) | |
597 return nullptr; | |
598 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated) | |
599 return nullptr; | |
600 if (type_id != 0 && block->type_id != type_id) | |
601 return nullptr; | |
602 } | |
603 | |
604 // Return pointer to block data. | |
605 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref); | |
606 } | |
607 | |
608 const volatile void* PersistentMemoryAllocator::GetBlockData( | |
609 Reference ref, | |
610 uint32_t type_id, | |
611 uint32_t size) const { | |
612 DCHECK(size > 0); | |
613 const volatile BlockHeader* block = | |
614 GetBlock(ref, type_id, size, false, false); | |
615 if (!block) | |
616 return nullptr; | |
617 return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader); | |
618 } | |
619 | |
620 void PersistentMemoryAllocator::UpdateStaticHistograms() { | |
621 DCHECK(!readonly_); | |
622 if (used_histogram_) { | |
623 MemoryInfo meminfo; | |
624 GetMemoryInfo(&meminfo); | |
625 HistogramBase::Sample usedkb = static_cast<HistogramBase::Sample>( | |
626 (meminfo.total - meminfo.free) >> 10); | |
627 used_histogram_->Add(usedkb); | |
628 } | |
629 } | |
630 | |
631 | |
632 //----- LocalPersistentMemoryAllocator ----------------------------------------- | |
633 | |
634 LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator( | |
635 size_t size, | |
636 uint32_t id, | |
637 const std::string& name) | |
638 : PersistentMemoryAllocator(memset(new char[size], 0, size), | |
639 size, 0, id, name, false) {} | |
640 | |
641 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() { | |
642 delete [] mem_base_; | |
643 } | |
644 | |
645 | |
646 //----- FilePersistentMemoryAllocator ------------------------------------------ | |
647 | |
648 FilePersistentMemoryAllocator::FilePersistentMemoryAllocator( | |
649 MemoryMappedFile* file, | |
650 uint32_t id, | |
651 const std::string& name) | |
652 : PersistentMemoryAllocator(const_cast<uint8_t*>(file->data()), | |
653 file->length(), 0, id, name, true), | |
654 mapped_file_(file) {} | |
655 | |
656 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() { | |
657 } | |
658 | |
659 // static | |
660 bool FilePersistentMemoryAllocator::IsFileAcceptable( | |
661 const MemoryMappedFile& file) { | |
662 return IsMemoryAcceptable(file.data(), file.length(), 0, true); | |
663 } | |
664 | |
665 } // namespace base | |
OLD | NEW |