Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(354)

Side by Side Diff: base/metrics/persistent_memory_allocator.cc

Issue 1803253002: Improved iterator for persistent memory allocator. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@refactor-hp
Patch Set: rebased Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/metrics/persistent_memory_allocator.h" 5 #include "base/metrics/persistent_memory_allocator.h"
6 6
7 #include <assert.h> 7 #include <assert.h>
8 #include <algorithm> 8 #include <algorithm>
9 9
10 #include "base/files/memory_mapped_file.h" 10 #include "base/files/memory_mapped_file.h"
(...skipping 27 matching lines...) Expand all
38 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> 38 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
39 // types rather than combined bitfield. 39 // types rather than combined bitfield.
40 40
41 // Flags stored in the flags_ field of the SharedMetaData structure below. 41 // Flags stored in the flags_ field of the SharedMetaData structure below.
42 enum : int { 42 enum : int {
43 kFlagCorrupt = 1 << 0, 43 kFlagCorrupt = 1 << 0,
44 kFlagFull = 1 << 1 44 kFlagFull = 1 << 1
45 }; 45 };
46 46
47 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) { 47 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
48 uint32_t loaded_flags = flags->load(); 48 uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
49 return (loaded_flags & flag) != 0; 49 return (loaded_flags & flag) != 0;
50 } 50 }
51 51
52 void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) { 52 void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
53 uint32_t loaded_flags = flags->load(); 53 uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
54 for (;;) { 54 for (;;) {
55 uint32_t new_flags = (loaded_flags & ~flag) | flag; 55 uint32_t new_flags = (loaded_flags & ~flag) | flag;
56 // In the failue case, actual "flags" value stored in loaded_flags. 56 // In the failue case, actual "flags" value stored in loaded_flags.
57 if (flags->compare_exchange_weak(loaded_flags, new_flags)) 57 if (flags->compare_exchange_weak(loaded_flags, new_flags))
58 break; 58 break;
59 } 59 }
60 } 60 }
61 61
62 } // namespace 62 } // namespace
63 63
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
112 112
113 // The "queue" block header is used to detect "last node" so that zero/null 113 // The "queue" block header is used to detect "last node" so that zero/null
114 // can be used to indicate that it hasn't been added at all. It is part of 114 // can be used to indicate that it hasn't been added at all. It is part of
115 // the SharedMetadata structure which itself is always located at offset zero. 115 // the SharedMetadata structure which itself is always located at offset zero.
116 const PersistentMemoryAllocator::Reference 116 const PersistentMemoryAllocator::Reference
117 PersistentMemoryAllocator::kReferenceQueue = 117 PersistentMemoryAllocator::kReferenceQueue =
118 offsetof(SharedMetadata, queue); 118 offsetof(SharedMetadata, queue);
119 const PersistentMemoryAllocator::Reference 119 const PersistentMemoryAllocator::Reference
120 PersistentMemoryAllocator::kReferenceNull = 0; 120 PersistentMemoryAllocator::kReferenceNull = 0;
121 121
122 PersistentMemoryAllocator::Iterator::Iterator(
123 const PersistentMemoryAllocator* allocator)
124 : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
125
126 PersistentMemoryAllocator::Iterator::Iterator(
127 const PersistentMemoryAllocator* allocator,
128 Reference starting_after)
129 : allocator_(allocator), last_record_(starting_after), record_count_(0) {
130 // Ensure that the starting point is a valid, iterable block (meaning it can
131 // be read and has a non-zero "next" pointer).
132 const volatile BlockHeader* block =
133 allocator_->GetBlock(starting_after, 0, 0, false, false);
134 if (!block || block->next.load(std::memory_order_relaxed) == 0) {
135 NOTREACHED();
136 last_record_.store(kReferenceQueue, std::memory_order_release);
137 }
138 }
139
140 PersistentMemoryAllocator::Reference
141 PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
142 // Make a copy of the existing count of found-records, acquiring all changes
143 // made to the allocator, notably "freeptr" (see comment in loop for why
144 // the load of that value cannot be moved above here) that occurred during
145 // any previous runs of this method, including those by parallel threads
146 // that interrupted it. It pairs with the Release at the end of this method.
147 //
148 // Otherwise, if the compiler were to arrange the two loads such that
149 // "count" was fetched _after_ "freeptr" then it would be possible for
150 // this thread to be interrupted between them and other threads perform
151 // multiple allocations, make-iterables, and iterations (with the included
152 // increment of |record_count_|) culminating in the check at the bottom
153 // mistakenly determining that a loop exists. Isn't this stuff fun?
154 uint32_t count = record_count_.load(std::memory_order_acquire);
155
156 Reference last = last_record_.load(std::memory_order_acquire);
157 Reference next;
158 while (true) {
159 const volatile BlockHeader* block =
160 allocator_->GetBlock(last, 0, 0, true, false);
161 if (!block) // Invalid iterator state.
162 return kReferenceNull;
163
164 // The compiler and CPU can freely reorder all memory accesses on which
165 // there are no dependencies. It could, for example, move the load of
166 // "freeptr" to above this point because there are no explicit dependencies
167 // between it and "next". If it did, however, then another block could
168 // be queued after that but before the following load meaning there is
169 // one more queued block than the future "detect loop by having more
170 // blocks that could fit before freeptr" will allow.
171 //
172 // By "acquiring" the "next" value here, it's synchronized to the enqueue
173 // of the node which in turn is synchronized to the allocation (which sets
174 // freeptr). Thus, the scenario above cannot happen.
175 next = block->next.load(std::memory_order_acquire);
176 if (next == kReferenceQueue) // No next allocation in queue.
177 return kReferenceNull;
178 block = allocator_->GetBlock(next, 0, 0, false, false);
179 if (!block) { // Memory is corrupt.
180 allocator_->SetCorrupt();
181 return kReferenceNull;
182 }
183
184 // Update the "last_record" pointer to be the reference being returned.
185 // If it fails then another thread has already iterated past it so loop
186 // again. Failing will also load the existing value into "last" so there
187 // is no need to do another such load when the while-loop restarts. A
188 // "strong" compare-exchange is used because failing unnecessarily would
189 // mean repeating some fairly costly validations above.
190 if (last_record_.compare_exchange_strong(last, next)) {
191 *type_return = block->type_id;
192 break;
193 }
194 }
195
196 // Memory corruption could cause a loop in the list. Such must be detected
197 // so as to not cause an infinite loop in the caller. This is done by simply
198 // making sure it doesn't iterate more times than the absolute maximum
199 // number of allocations that could have been made. Callers are likely
200 // to loop multiple times before it is detected but at least it stops.
201 const uint32_t freeptr = std::min(
202 allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
203 allocator_->mem_size_);
204 const uint32_t max_records =
205 freeptr / (sizeof(BlockHeader) + kAllocAlignment);
206 if (count > max_records) {
207 allocator_->SetCorrupt();
208 return kReferenceNull;
209 }
210
211 // Increment the count and release the changes made above. It pairs with
212 // the Acquire at the top of this method. Note that this operation is not
213 // strictly synchonized with fetching of the object to return, which would
214 // have to be done inside the loop and is somewhat complicated to achieve.
215 // It does not matter if it falls behind temporarily so long as it never
216 // gets ahead.
217 record_count_.fetch_add(1, std::memory_order_release);
218 return next;
219 }
220
221 PersistentMemoryAllocator::Reference
222 PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
223 Reference ref;
224 uint32_t type_found;
225 while ((ref = GetNext(&type_found)) != 0) {
226 if (type_found == type_match)
227 return ref;
228 }
229 return kReferenceNull;
230 }
122 231
123 // static 232 // static
124 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base, 233 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
125 size_t size, 234 size_t size,
126 size_t page_size, 235 size_t page_size,
127 bool readonly) { 236 bool readonly) {
128 return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) && 237 return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
129 (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) && 238 (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
130 (size >= kSegmentMinSize || readonly) && 239 (size >= kSegmentMinSize || readonly) &&
131 (size % kAllocAlignment == 0 || readonly) && 240 (size % kAllocAlignment == 0 || readonly) &&
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
170 } 279 }
171 280
172 // This block is only executed when a completely new memory segment is 281 // This block is only executed when a completely new memory segment is
173 // being initialized. It's unshared and single-threaded... 282 // being initialized. It's unshared and single-threaded...
174 volatile BlockHeader* const first_block = 283 volatile BlockHeader* const first_block =
175 reinterpret_cast<volatile BlockHeader*>(mem_base_ + 284 reinterpret_cast<volatile BlockHeader*>(mem_base_ +
176 sizeof(SharedMetadata)); 285 sizeof(SharedMetadata));
177 if (shared_meta()->cookie != 0 || 286 if (shared_meta()->cookie != 0 ||
178 shared_meta()->size != 0 || 287 shared_meta()->size != 0 ||
179 shared_meta()->version != 0 || 288 shared_meta()->version != 0 ||
180 shared_meta()->freeptr.load() != 0 || 289 shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
181 shared_meta()->flags.load() != 0 || 290 shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
182 shared_meta()->id != 0 || 291 shared_meta()->id != 0 ||
183 shared_meta()->name != 0 || 292 shared_meta()->name != 0 ||
184 shared_meta()->tailptr != 0 || 293 shared_meta()->tailptr != 0 ||
185 shared_meta()->queue.cookie != 0 || 294 shared_meta()->queue.cookie != 0 ||
186 shared_meta()->queue.next.load() != 0 || 295 shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
187 first_block->size != 0 || 296 first_block->size != 0 ||
188 first_block->cookie != 0 || 297 first_block->cookie != 0 ||
189 first_block->type_id != 0 || 298 first_block->type_id != 0 ||
190 first_block->next != 0) { 299 first_block->next != 0) {
191 // ...or something malicious has been playing with the metadata. 300 // ...or something malicious has been playing with the metadata.
192 NOTREACHED(); 301 NOTREACHED();
193 SetCorrupt(); 302 SetCorrupt();
194 } 303 }
195 304
196 // This is still safe to do even if corruption has been detected. 305 // This is still safe to do even if corruption has been detected.
197 shared_meta()->cookie = kGlobalCookie; 306 shared_meta()->cookie = kGlobalCookie;
198 shared_meta()->size = mem_size_; 307 shared_meta()->size = mem_size_;
199 shared_meta()->page_size = mem_page_; 308 shared_meta()->page_size = mem_page_;
200 shared_meta()->version = kGlobalVersion; 309 shared_meta()->version = kGlobalVersion;
201 shared_meta()->id = id; 310 shared_meta()->id = id;
202 shared_meta()->freeptr.store(sizeof(SharedMetadata)); 311 shared_meta()->freeptr.store(sizeof(SharedMetadata),
312 std::memory_order_release);
203 313
204 // Set up the queue of iterable allocations. 314 // Set up the queue of iterable allocations.
205 shared_meta()->queue.size = sizeof(BlockHeader); 315 shared_meta()->queue.size = sizeof(BlockHeader);
206 shared_meta()->queue.cookie = kBlockCookieQueue; 316 shared_meta()->queue.cookie = kBlockCookieQueue;
207 shared_meta()->queue.next.store(kReferenceQueue); 317 shared_meta()->queue.next.store(kReferenceQueue, std::memory_order_release);
208 shared_meta()->tailptr.store(kReferenceQueue); 318 shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
209 319
210 // Allocate space for the name so other processes can learn it. 320 // Allocate space for the name so other processes can learn it.
211 if (!name.empty()) { 321 if (!name.empty()) {
212 const size_t name_length = name.length() + 1; 322 const size_t name_length = name.length() + 1;
213 shared_meta()->name = Allocate(name_length, 0); 323 shared_meta()->name = Allocate(name_length, 0);
214 char* name_cstr = GetAsObject<char>(shared_meta()->name, 0); 324 char* name_cstr = GetAsObject<char>(shared_meta()->name, 0);
215 if (name_cstr) 325 if (name_cstr)
216 memcpy(name_cstr, name.data(), name.length()); 326 memcpy(name_cstr, name.data(), name.length());
217 } 327 }
218 } else { 328 } else {
219 if (shared_meta()->size == 0 || 329 if (shared_meta()->size == 0 ||
220 shared_meta()->version == 0 || 330 shared_meta()->version == 0 ||
221 shared_meta()->freeptr.load() == 0 || 331 shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
222 shared_meta()->tailptr == 0 || 332 shared_meta()->tailptr == 0 ||
223 shared_meta()->queue.cookie == 0 || 333 shared_meta()->queue.cookie == 0 ||
224 shared_meta()->queue.next.load() == 0) { 334 shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
225 SetCorrupt(); 335 SetCorrupt();
226 } 336 }
227 if (!readonly) { 337 if (!readonly) {
228 // The allocator is attaching to a previously initialized segment of 338 // The allocator is attaching to a previously initialized segment of
229 // memory. Make sure the embedded data matches what has been passed. 339 // memory. Make sure the embedded data matches what has been passed.
230 if (shared_meta()->size != mem_size_ || 340 if (shared_meta()->size != mem_size_ ||
231 shared_meta()->page_size != mem_page_) { 341 shared_meta()->page_size != mem_page_) {
232 NOTREACHED(); 342 NOTREACHED();
233 SetCorrupt(); 343 SetCorrupt();
234 } 344 }
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
274 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21, 384 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
275 HistogramBase::kUmaTargetedHistogramFlag); 385 HistogramBase::kUmaTargetedHistogramFlag);
276 386
277 DCHECK(!allocs_histogram_); 387 DCHECK(!allocs_histogram_);
278 allocs_histogram_ = Histogram::FactoryGet( 388 allocs_histogram_ = Histogram::FactoryGet(
279 "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50, 389 "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
280 HistogramBase::kUmaTargetedHistogramFlag); 390 HistogramBase::kUmaTargetedHistogramFlag);
281 } 391 }
282 392
283 size_t PersistentMemoryAllocator::used() const { 393 size_t PersistentMemoryAllocator::used() const {
284 return std::min(shared_meta()->freeptr.load(), mem_size_); 394 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
395 mem_size_);
285 } 396 }
286 397
287 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const { 398 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
288 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); 399 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
289 if (!block) 400 if (!block)
290 return 0; 401 return 0;
291 uint32_t size = block->size; 402 uint32_t size = block->size;
292 // Header was verified by GetBlock() but a malicious actor could change 403 // Header was verified by GetBlock() but a malicious actor could change
293 // the value between there and here. Check it again. 404 // the value between there and here. Check it again.
294 if (size <= sizeof(BlockHeader) || ref + size > mem_size_) { 405 if (size <= sizeof(BlockHeader) || ref + size > mem_size_) {
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
347 NOTREACHED(); 458 NOTREACHED();
348 return kReferenceNull; 459 return kReferenceNull;
349 } 460 }
350 461
351 // Get the current start of unallocated memory. Other threads may 462 // Get the current start of unallocated memory. Other threads may
352 // update this at any time and cause us to retry these operations. 463 // update this at any time and cause us to retry these operations.
353 // This value should be treated as "const" to avoid confusion through 464 // This value should be treated as "const" to avoid confusion through
354 // the code below but recognize that any failed compare-exchange operation 465 // the code below but recognize that any failed compare-exchange operation
355 // involving it will cause it to be loaded with a more recent value. The 466 // involving it will cause it to be loaded with a more recent value. The
356 // code should either exit or restart the loop in that case. 467 // code should either exit or restart the loop in that case.
357 /* const */ uint32_t freeptr = shared_meta()->freeptr.load(); 468 /* const */ uint32_t freeptr =
469 shared_meta()->freeptr.load(std::memory_order_acquire);
358 470
359 // Allocation is lockless so we do all our caculation and then, if saving 471 // Allocation is lockless so we do all our caculation and then, if saving
360 // indicates a change has occurred since we started, scrap everything and 472 // indicates a change has occurred since we started, scrap everything and
361 // start over. 473 // start over.
362 for (;;) { 474 for (;;) {
363 if (IsCorrupt()) 475 if (IsCorrupt())
364 return kReferenceNull; 476 return kReferenceNull;
365 477
366 if (freeptr + size > mem_size_) { 478 if (freeptr + size > mem_size_) {
367 SetFlag(&shared_meta()->flags, kFlagFull); 479 SetFlag(&shared_meta()->flags, kFlagFull);
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
417 529
418 // Given that all memory was zeroed before ever being given to an instance 530 // Given that all memory was zeroed before ever being given to an instance
419 // of this class and given that we only allocate in a monotomic fashion 531 // of this class and given that we only allocate in a monotomic fashion
420 // going forward, it must be that the newly allocated block is completely 532 // going forward, it must be that the newly allocated block is completely
421 // full of zeros. If we find anything in the block header that is NOT a 533 // full of zeros. If we find anything in the block header that is NOT a
422 // zero then something must have previously run amuck through memory, 534 // zero then something must have previously run amuck through memory,
423 // writing beyond the allocated space and into unallocated space. 535 // writing beyond the allocated space and into unallocated space.
424 if (block->size != 0 || 536 if (block->size != 0 ||
425 block->cookie != kBlockCookieFree || 537 block->cookie != kBlockCookieFree ||
426 block->type_id != 0 || 538 block->type_id != 0 ||
427 block->next.load() != 0) { 539 block->next.load(std::memory_order_relaxed) != 0) {
428 SetCorrupt(); 540 SetCorrupt();
429 return kReferenceNull; 541 return kReferenceNull;
430 } 542 }
431 543
432 block->size = size; 544 block->size = size;
433 block->cookie = kBlockCookieAllocated; 545 block->cookie = kBlockCookieAllocated;
434 block->type_id = type_id; 546 block->type_id = type_id;
435 return freeptr; 547 return freeptr;
436 } 548 }
437 } 549 }
438 550
439 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const { 551 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
440 uint32_t remaining = std::max(mem_size_ - shared_meta()->freeptr.load(), 552 uint32_t remaining = std::max(
441 (uint32_t)sizeof(BlockHeader)); 553 mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
554 (uint32_t)sizeof(BlockHeader));
442 meminfo->total = mem_size_; 555 meminfo->total = mem_size_;
443 meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader); 556 meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader);
444 } 557 }
445 558
446 void PersistentMemoryAllocator::MakeIterable(Reference ref) { 559 void PersistentMemoryAllocator::MakeIterable(Reference ref) {
447 DCHECK(!readonly_); 560 DCHECK(!readonly_);
448 if (IsCorrupt()) 561 if (IsCorrupt())
449 return; 562 return;
450 volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false); 563 volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
451 if (!block) // invalid reference 564 if (!block) // invalid reference
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
494 // check for crash/kill which means that this operation may also happen 607 // check for crash/kill which means that this operation may also happen
495 // even when the other thread is in perfect working order which is what 608 // even when the other thread is in perfect working order which is what
496 // necessitates the CompareAndSwap above. 609 // necessitates the CompareAndSwap above.
497 shared_meta()->tailptr.compare_exchange_strong(tail, next, 610 shared_meta()->tailptr.compare_exchange_strong(tail, next,
498 std::memory_order_acq_rel, 611 std::memory_order_acq_rel,
499 std::memory_order_acquire); 612 std::memory_order_acquire);
500 } 613 }
501 } 614 }
502 } 615 }
503 616
504 void PersistentMemoryAllocator::CreateIterator(Iterator* state,
505 Reference starting_after) const {
506 if (starting_after) {
507 // Ensure that the starting point is a valid, iterable block.
508 const volatile BlockHeader* block =
509 GetBlock(starting_after, 0, 0, false, false);
510 if (!block || !block->next.load()) {
511 NOTREACHED();
512 starting_after = kReferenceQueue;
513 }
514 } else {
515 // A zero beginning is really the Queue reference.
516 starting_after = kReferenceQueue;
517 }
518
519 state->last = starting_after;
520 state->niter = 0;
521 }
522
523 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetNextIterable(
524 Iterator* state,
525 uint32_t* type_id) const {
526 const volatile BlockHeader* block = GetBlock(state->last, 0, 0, true, false);
527 if (!block) // invalid iterator state
528 return kReferenceNull;
529
530 // The compiler and CPU can freely reorder all memory accesses on which
531 // there are no dependencies. It could, for example, move the load of
532 // "freeptr" above this point because there are no explicit dependencies
533 // between it and "next". If it did, however, then another block could
534 // be queued after that but before the following load meaning there is
535 // one more queued block than the future "detect loop by having more
536 // blocks that could fit before freeptr" will allow.
537 //
538 // By "acquiring" the "next" value here, it's synchronized to the enqueue
539 // of the node which in turn is synchronized to the allocation (which sets
540 // freeptr). Thus, the scenario above cannot happen.
541 uint32_t next = block->next.load(std::memory_order_acquire);
542 block = GetBlock(next, 0, 0, false, false);
543 if (!block) // no next allocation in queue
544 return kReferenceNull;
545
546 // Memory corruption could cause a loop in the list. We need to detect
547 // that so as to not cause an infinite loop in the caller. We do this
548 // simply by making sure we don't iterate more than the absolute maximum
549 // number of allocations that could have been made. Callers are likely
550 // to loop multiple times before it is detected but at least it stops.
551 uint32_t freeptr = std::min(
552 shared_meta()->freeptr.load(std::memory_order_acquire),
553 mem_size_);
554 if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) {
555 SetCorrupt();
556 return kReferenceNull;
557 }
558
559 state->last = next;
560 state->niter++;
561 *type_id = block->type_id;
562
563 return next;
564 }
565
566 // The "corrupted" state is held both locally and globally (shared). The 617 // The "corrupted" state is held both locally and globally (shared). The
567 // shared flag can't be trusted since a malicious actor could overwrite it. 618 // shared flag can't be trusted since a malicious actor could overwrite it.
568 // Because corruption can be detected during read-only operations such as 619 // Because corruption can be detected during read-only operations such as
569 // iteration, this method may be called by other "const" methods. In this 620 // iteration, this method may be called by other "const" methods. In this
570 // case, it's safe to discard the constness and modify the local flag and 621 // case, it's safe to discard the constness and modify the local flag and
571 // maybe even the shared flag if the underlying data isn't actually read-only. 622 // maybe even the shared flag if the underlying data isn't actually read-only.
572 void PersistentMemoryAllocator::SetCorrupt() const { 623 void PersistentMemoryAllocator::SetCorrupt() const {
573 LOG(ERROR) << "Corruption detected in shared-memory segment."; 624 LOG(ERROR) << "Corruption detected in shared-memory segment.";
574 const_cast<std::atomic<bool>*>(&corrupt_)->store(true); 625 const_cast<std::atomic<bool>*>(&corrupt_)->store(true,
626 std::memory_order_relaxed);
575 if (!readonly_) { 627 if (!readonly_) {
576 SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags), 628 SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
577 kFlagCorrupt); 629 kFlagCorrupt);
578 } 630 }
579 } 631 }
580 632
581 bool PersistentMemoryAllocator::IsCorrupt() const { 633 bool PersistentMemoryAllocator::IsCorrupt() const {
582 if (corrupt_.load() || CheckFlag(&shared_meta()->flags, kFlagCorrupt)) { 634 if (corrupt_.load(std::memory_order_relaxed) ||
635 CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
583 SetCorrupt(); // Make sure all indicators are set. 636 SetCorrupt(); // Make sure all indicators are set.
584 return true; 637 return true;
585 } 638 }
586 return false; 639 return false;
587 } 640 }
588 641
589 bool PersistentMemoryAllocator::IsFull() const { 642 bool PersistentMemoryAllocator::IsFull() const {
590 return CheckFlag(&shared_meta()->flags, kFlagFull); 643 return CheckFlag(&shared_meta()->flags, kFlagFull);
591 } 644 }
592 645
(...skipping 10 matching lines...) Expand all
603 if (ref % kAllocAlignment != 0) 656 if (ref % kAllocAlignment != 0)
604 return nullptr; 657 return nullptr;
605 if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata))) 658 if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
606 return nullptr; 659 return nullptr;
607 size += sizeof(BlockHeader); 660 size += sizeof(BlockHeader);
608 if (ref + size > mem_size_) 661 if (ref + size > mem_size_)
609 return nullptr; 662 return nullptr;
610 663
611 // Validation of referenced block-header. 664 // Validation of referenced block-header.
612 if (!free_ok) { 665 if (!free_ok) {
613 uint32_t freeptr = shared_meta()->freeptr.load(); 666 uint32_t freeptr = std::min(
667 shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
614 if (ref + size > freeptr) 668 if (ref + size > freeptr)
615 return nullptr; 669 return nullptr;
616 const volatile BlockHeader* const block = 670 const volatile BlockHeader* const block =
617 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); 671 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
618 if (block->size < size) 672 if (block->size < size)
619 return nullptr; 673 return nullptr;
674 if (ref + block->size > freeptr)
675 return nullptr;
620 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated) 676 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
621 return nullptr; 677 return nullptr;
622 if (type_id != 0 && block->type_id != type_id) 678 if (type_id != 0 && block->type_id != type_id)
623 return nullptr; 679 return nullptr;
624 } 680 }
625 681
626 // Return pointer to block data. 682 // Return pointer to block data.
627 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref); 683 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
628 } 684 }
629 685
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
705 761
706 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {} 762 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
707 763
708 // static 764 // static
709 bool FilePersistentMemoryAllocator::IsFileAcceptable( 765 bool FilePersistentMemoryAllocator::IsFileAcceptable(
710 const MemoryMappedFile& file) { 766 const MemoryMappedFile& file) {
711 return IsMemoryAcceptable(file.data(), file.length(), 0, true); 767 return IsMemoryAcceptable(file.data(), file.length(), 0, true);
712 } 768 }
713 769
714 } // namespace base 770 } // namespace base
OLDNEW
« no previous file with comments | « base/metrics/persistent_memory_allocator.h ('k') | base/metrics/persistent_memory_allocator_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698