Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(131)

Side by Side Diff: base/metrics/persistent_memory_allocator.cc

Issue 1803253002: Improved iterator for persistent memory allocator. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@refactor-hp
Patch Set: rebased and fixed up a bit Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/metrics/persistent_memory_allocator.h" 5 #include "base/metrics/persistent_memory_allocator.h"
6 6
7 #include <assert.h> 7 #include <assert.h>
8 #include <algorithm> 8 #include <algorithm>
9 9
10 #include "base/files/memory_mapped_file.h" 10 #include "base/files/memory_mapped_file.h"
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
112 112
113 // The "queue" block header is used to detect "last node" so that zero/null 113 // The "queue" block header is used to detect "last node" so that zero/null
114 // can be used to indicate that it hasn't been added at all. It is part of 114 // can be used to indicate that it hasn't been added at all. It is part of
115 // the SharedMetadata structure which itself is always located at offset zero. 115 // the SharedMetadata structure which itself is always located at offset zero.
116 const PersistentMemoryAllocator::Reference 116 const PersistentMemoryAllocator::Reference
117 PersistentMemoryAllocator::kReferenceQueue = 117 PersistentMemoryAllocator::kReferenceQueue =
118 offsetof(SharedMetadata, queue); 118 offsetof(SharedMetadata, queue);
119 const PersistentMemoryAllocator::Reference 119 const PersistentMemoryAllocator::Reference
120 PersistentMemoryAllocator::kReferenceNull = 0; 120 PersistentMemoryAllocator::kReferenceNull = 0;
121 121
122 PersistentMemoryAllocator::Iterator::Iterator(
123 const PersistentMemoryAllocator* allocator)
124 : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {}
125
126 PersistentMemoryAllocator::Iterator::Iterator(
127 const PersistentMemoryAllocator* allocator,
128 Reference starting_after)
129 : allocator_(allocator), last_record_(starting_after), record_count_(0) {
130 // Ensure that the starting point is a valid, iterable block (meaning it can
131 // be read and has a non-zero "next" pointer).
132 const volatile BlockHeader* block =
133 allocator_->GetBlock(starting_after, 0, 0, false, false);
134 if (!block || block->next.load() == 0) {
135 NOTREACHED();
136 last_record_ = kReferenceQueue;
Ilya Sherman 2016/03/24 02:03:58 FWIW, NOTREACHED() and DCHECK failures should be u
bcwhite 2016/03/24 14:22:34 In a properly running instance, this should never
137 }
138 }
139
140 PersistentMemoryAllocator::Reference
141 PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
142 // Make a copy of the existing count of found-records, acquiring all changes
143 // made to the allocator, notably "freeptr" (see comment in loop for why
144 // the load of that value cannot be moved above here) that occurred during
145 // any previous runs of this method, including those by parallel threads
146 // that interrupted it. It pairs with the Release at the end of this method.
147 //
148 // Otherwise, if the compiler were to arrange the two loads such that
149 // "count" was fetched _after_ "freeptr" then it would be possible for
150 // this thread to be interrupted between them and other threads perform
151 // multiple allocations, make-iterables, and iterations (with the included
152 // increment of |record_count_|) culminating in the check at the bottom
153 // mistakenly determining that a loop exists. Isn't this stuff fun?
154 uint32_t count = record_count_.load(std::memory_order_acquire);
155
156 Reference last = last_record_.load();
157 Reference next;
158 while (true) {
159 const volatile BlockHeader* block =
160 allocator_->GetBlock(last, 0, 0, true, false);
161 if (!block) // Invalid iterator state.
162 return kReferenceNull;
163
164 // The compiler and CPU can freely reorder all memory accesses on which
165 // there are no dependencies. It could, for example, move the load of
166 // "freeptr" to above this point because there are no explicit dependencies
167 // between it and "next". If it did, however, then another block could
168 // be queued after that but before the following load meaning there is
169 // one more queued block than the future "detect loop by having more
170 // blocks that could fit before freeptr" will allow.
171 //
172 // By "acquiring" the "next" value here, it's synchronized to the enqueue
173 // of the node which in turn is synchronized to the allocation (which sets
174 // freeptr). Thus, the scenario above cannot happen.
175 next = block->next.load(std::memory_order_acquire);
176 if (next == kReferenceQueue) // No next allocation in queue.
177 return kReferenceNull;
178 block = allocator_->GetBlock(next, 0, 0, false, false);
179 if (!block) { // Memory is corrupt.
180 allocator_->SetCorrupt();
181 return kReferenceNull;
182 }
183
184 // Update the "last_record" pointer to be the reference being returned.
185 // If it fails then another thread has already iterated past it so loop
186 // again. Failing will also load the existing value into "last" so there
187 // is no need to do another such load when the while-loop restarts. A
188 // "strong" compare-exchange is used because failing unnecessarily would
189 // mean repeating some fairly costly validations above.
190 if (last_record_.compare_exchange_strong(last, next)) {
191 *type_return = block->type_id;
192 break;
193 }
194 }
195
196 // Memory corruption could cause a loop in the list. Such must be detected
197 // so as to not cause an infinite loop in the caller. This is done by simply
198 // making sure it doesn't iterate more times than the absolute maximum
199 // number of allocations that could have been made. Callers are likely
200 // to loop multiple times before it is detected but at least it stops.
201 const uint32_t freeptr = std::min(
202 allocator_->shared_meta()->freeptr.load(std::memory_order_acquire),
203 allocator_->mem_size_);
204 const uint32_t max_records =
205 freeptr / (sizeof(BlockHeader) + kAllocAlignment);
206 if (count > max_records) {
207 allocator_->SetCorrupt();
208 return kReferenceNull;
209 }
210
211 // Increment the count and release the changes made above. It pairs with
212 // the Acquire at the top of this method. Note that this operation is not
213 // strictly synchonized with fetching of the object to return, which would
214 // have to be done inside the loop and is somewhat complicated to achieve.
215 // It does not matter if it falls behind temporarily so long as it never
216 // gets ahead.
217 record_count_.fetch_add(1, std::memory_order_release);
218 return next;
219 }
Ilya Sherman 2016/03/24 02:03:58 Sorry, I don't feel qualified to review this code
bcwhite 2016/03/24 14:22:34 Will do.
220
221 PersistentMemoryAllocator::Reference
222 PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) {
223 Reference ref;
224 uint32_t type_found;
225 while ((ref = GetNext(&type_found)) != 0) {
226 if (type_found == type_match)
227 return ref;
228 }
229 return kReferenceNull;
230 }
122 231
123 // static 232 // static
124 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base, 233 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
125 size_t size, 234 size_t size,
126 size_t page_size, 235 size_t page_size,
127 bool readonly) { 236 bool readonly) {
128 return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) && 237 return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
129 (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) && 238 (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
130 (size >= kSegmentMinSize || readonly) && 239 (size >= kSegmentMinSize || readonly) &&
131 (size % kAllocAlignment == 0 || readonly) && 240 (size % kAllocAlignment == 0 || readonly) &&
(...skipping 362 matching lines...) Expand 10 before | Expand all | Expand 10 after
494 // check for crash/kill which means that this operation may also happen 603 // check for crash/kill which means that this operation may also happen
495 // even when the other thread is in perfect working order which is what 604 // even when the other thread is in perfect working order which is what
496 // necessitates the CompareAndSwap above. 605 // necessitates the CompareAndSwap above.
497 shared_meta()->tailptr.compare_exchange_strong(tail, next, 606 shared_meta()->tailptr.compare_exchange_strong(tail, next,
498 std::memory_order_acq_rel, 607 std::memory_order_acq_rel,
499 std::memory_order_acquire); 608 std::memory_order_acquire);
500 } 609 }
501 } 610 }
502 } 611 }
503 612
504 void PersistentMemoryAllocator::CreateIterator(Iterator* state,
505 Reference starting_after) const {
506 if (starting_after) {
507 // Ensure that the starting point is a valid, iterable block.
508 const volatile BlockHeader* block =
509 GetBlock(starting_after, 0, 0, false, false);
510 if (!block || !block->next.load()) {
511 NOTREACHED();
512 starting_after = kReferenceQueue;
513 }
514 } else {
515 // A zero beginning is really the Queue reference.
516 starting_after = kReferenceQueue;
517 }
518
519 state->last = starting_after;
520 state->niter = 0;
521 }
522
523 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetNextIterable(
524 Iterator* state,
525 uint32_t* type_id) const {
526 const volatile BlockHeader* block = GetBlock(state->last, 0, 0, true, false);
527 if (!block) // invalid iterator state
528 return kReferenceNull;
529
530 // The compiler and CPU can freely reorder all memory accesses on which
531 // there are no dependencies. It could, for example, move the load of
532 // "freeptr" above this point because there are no explicit dependencies
533 // between it and "next". If it did, however, then another block could
534 // be queued after that but before the following load meaning there is
535 // one more queued block than the future "detect loop by having more
536 // blocks that could fit before freeptr" will allow.
537 //
538 // By "acquiring" the "next" value here, it's synchronized to the enqueue
539 // of the node which in turn is synchronized to the allocation (which sets
540 // freeptr). Thus, the scenario above cannot happen.
541 uint32_t next = block->next.load(std::memory_order_acquire);
542 block = GetBlock(next, 0, 0, false, false);
543 if (!block) // no next allocation in queue
544 return kReferenceNull;
545
546 // Memory corruption could cause a loop in the list. We need to detect
547 // that so as to not cause an infinite loop in the caller. We do this
548 // simply by making sure we don't iterate more than the absolute maximum
549 // number of allocations that could have been made. Callers are likely
550 // to loop multiple times before it is detected but at least it stops.
551 uint32_t freeptr = std::min(
552 shared_meta()->freeptr.load(std::memory_order_acquire),
553 mem_size_);
554 if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) {
555 SetCorrupt();
556 return kReferenceNull;
557 }
558
559 state->last = next;
560 state->niter++;
561 *type_id = block->type_id;
562
563 return next;
564 }
565
566 // The "corrupted" state is held both locally and globally (shared). The 613 // The "corrupted" state is held both locally and globally (shared). The
567 // shared flag can't be trusted since a malicious actor could overwrite it. 614 // shared flag can't be trusted since a malicious actor could overwrite it.
568 // Because corruption can be detected during read-only operations such as 615 // Because corruption can be detected during read-only operations such as
569 // iteration, this method may be called by other "const" methods. In this 616 // iteration, this method may be called by other "const" methods. In this
570 // case, it's safe to discard the constness and modify the local flag and 617 // case, it's safe to discard the constness and modify the local flag and
571 // maybe even the shared flag if the underlying data isn't actually read-only. 618 // maybe even the shared flag if the underlying data isn't actually read-only.
572 void PersistentMemoryAllocator::SetCorrupt() const { 619 void PersistentMemoryAllocator::SetCorrupt() const {
573 LOG(ERROR) << "Corruption detected in shared-memory segment."; 620 LOG(ERROR) << "Corruption detected in shared-memory segment.";
574 const_cast<std::atomic<bool>*>(&corrupt_)->store(true); 621 const_cast<std::atomic<bool>*>(&corrupt_)->store(true);
575 if (!readonly_) { 622 if (!readonly_) {
(...skipping 27 matching lines...) Expand all
603 if (ref % kAllocAlignment != 0) 650 if (ref % kAllocAlignment != 0)
604 return nullptr; 651 return nullptr;
605 if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata))) 652 if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
606 return nullptr; 653 return nullptr;
607 size += sizeof(BlockHeader); 654 size += sizeof(BlockHeader);
608 if (ref + size > mem_size_) 655 if (ref + size > mem_size_)
609 return nullptr; 656 return nullptr;
610 657
611 // Validation of referenced block-header. 658 // Validation of referenced block-header.
612 if (!free_ok) { 659 if (!free_ok) {
613 uint32_t freeptr = shared_meta()->freeptr.load(); 660 uint32_t freeptr = std::min(shared_meta()->freeptr.load(), mem_size_);
614 if (ref + size > freeptr) 661 if (ref + size > freeptr)
615 return nullptr; 662 return nullptr;
616 const volatile BlockHeader* const block = 663 const volatile BlockHeader* const block =
617 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); 664 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
618 if (block->size < size) 665 if (block->size < size)
619 return nullptr; 666 return nullptr;
667 if (ref + block->size > freeptr)
668 return nullptr;
620 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated) 669 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
621 return nullptr; 670 return nullptr;
622 if (type_id != 0 && block->type_id != type_id) 671 if (type_id != 0 && block->type_id != type_id)
623 return nullptr; 672 return nullptr;
624 } 673 }
625 674
626 // Return pointer to block data. 675 // Return pointer to block data.
627 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref); 676 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
628 } 677 }
629 678
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
697 746
698 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {} 747 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
699 748
700 // static 749 // static
701 bool FilePersistentMemoryAllocator::IsFileAcceptable( 750 bool FilePersistentMemoryAllocator::IsFileAcceptable(
702 const MemoryMappedFile& file) { 751 const MemoryMappedFile& file) {
703 return IsMemoryAcceptable(file.data(), file.length(), 0, true); 752 return IsMemoryAcceptable(file.data(), file.length(), 0, true);
704 } 753 }
705 754
706 } // namespace base 755 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698