Index: base/metrics/persistent_memory_allocator.cc |
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc |
index a1a960c9bbc907601d83c461810d1c60ab0de1c8..ab3fb3476894f05681f7fefd2cb20c1044b6a7f1 100644 |
--- a/base/metrics/persistent_memory_allocator.cc |
+++ b/base/metrics/persistent_memory_allocator.cc |
@@ -119,6 +119,115 @@ const PersistentMemoryAllocator::Reference |
const PersistentMemoryAllocator::Reference |
PersistentMemoryAllocator::kReferenceNull = 0; |
+PersistentMemoryAllocator::Iterator::Iterator( |
+ const PersistentMemoryAllocator* allocator) |
+ : allocator_(allocator), last_record_(kReferenceQueue), record_count_(0) {} |
+ |
+PersistentMemoryAllocator::Iterator::Iterator( |
+ const PersistentMemoryAllocator* allocator, |
+ Reference starting_after) |
+ : allocator_(allocator), last_record_(starting_after), record_count_(0) { |
+ // Ensure that the starting point is a valid, iterable block (meaning it can |
+ // be read and has a non-zero "next" pointer). |
+ const volatile BlockHeader* block = |
+ allocator_->GetBlock(starting_after, 0, 0, false, false); |
+ if (!block || block->next.load() == 0) { |
+ NOTREACHED(); |
+ last_record_ = kReferenceQueue; |
Ilya Sherman
2016/03/24 02:03:58
FWIW, NOTREACHED() and DCHECK failures should be u
bcwhite
2016/03/24 14:22:34
In a properly running instance, this should never
|
+ } |
+} |
+ |
+PersistentMemoryAllocator::Reference |
+PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) { |
+ // Make a copy of the existing count of found-records, acquiring all changes |
+ // made to the allocator, notably "freeptr" (see comment in loop for why |
+ // the load of that value cannot be moved above here) that occurred during |
+ // any previous runs of this method, including those by parallel threads |
+ // that interrupted it. It pairs with the Release at the end of this method. |
+ // |
+ // Otherwise, if the compiler were to arrange the two loads such that |
+ // "count" was fetched _after_ "freeptr" then it would be possible for |
+ // this thread to be interrupted between them and other threads perform |
+ // multiple allocations, make-iterables, and iterations (with the included |
+ // increment of |record_count_|) culminating in the check at the bottom |
+ // mistakenly determining that a loop exists. Isn't this stuff fun? |
+ uint32_t count = record_count_.load(std::memory_order_acquire); |
+ |
+ Reference last = last_record_.load(); |
+ Reference next; |
+ while (true) { |
+ const volatile BlockHeader* block = |
+ allocator_->GetBlock(last, 0, 0, true, false); |
+ if (!block) // Invalid iterator state. |
+ return kReferenceNull; |
+ |
+ // The compiler and CPU can freely reorder all memory accesses on which |
+ // there are no dependencies. It could, for example, move the load of |
+ // "freeptr" to above this point because there are no explicit dependencies |
+ // between it and "next". If it did, however, then another block could |
+ // be queued after that but before the following load meaning there is |
+ // one more queued block than the future "detect loop by having more |
+ // blocks that could fit before freeptr" will allow. |
+ // |
+ // By "acquiring" the "next" value here, it's synchronized to the enqueue |
+ // of the node which in turn is synchronized to the allocation (which sets |
+ // freeptr). Thus, the scenario above cannot happen. |
+ next = block->next.load(std::memory_order_acquire); |
+ if (next == kReferenceQueue) // No next allocation in queue. |
+ return kReferenceNull; |
+ block = allocator_->GetBlock(next, 0, 0, false, false); |
+ if (!block) { // Memory is corrupt. |
+ allocator_->SetCorrupt(); |
+ return kReferenceNull; |
+ } |
+ |
+ // Update the "last_record" pointer to be the reference being returned. |
+ // If it fails then another thread has already iterated past it so loop |
+ // again. Failing will also load the existing value into "last" so there |
+ // is no need to do another such load when the while-loop restarts. A |
+ // "strong" compare-exchange is used because failing unnecessarily would |
+ // mean repeating some fairly costly validations above. |
+ if (last_record_.compare_exchange_strong(last, next)) { |
+ *type_return = block->type_id; |
+ break; |
+ } |
+ } |
+ |
+ // Memory corruption could cause a loop in the list. Such must be detected |
+ // so as to not cause an infinite loop in the caller. This is done by simply |
+ // making sure it doesn't iterate more times than the absolute maximum |
+ // number of allocations that could have been made. Callers are likely |
+ // to loop multiple times before it is detected but at least it stops. |
+ const uint32_t freeptr = std::min( |
+ allocator_->shared_meta()->freeptr.load(std::memory_order_acquire), |
+ allocator_->mem_size_); |
+ const uint32_t max_records = |
+ freeptr / (sizeof(BlockHeader) + kAllocAlignment); |
+ if (count > max_records) { |
+ allocator_->SetCorrupt(); |
+ return kReferenceNull; |
+ } |
+ |
+ // Increment the count and release the changes made above. It pairs with |
+ // the Acquire at the top of this method. Note that this operation is not |
+ // strictly synchonized with fetching of the object to return, which would |
+ // have to be done inside the loop and is somewhat complicated to achieve. |
+ // It does not matter if it falls behind temporarily so long as it never |
+ // gets ahead. |
+ record_count_.fetch_add(1, std::memory_order_release); |
+ return next; |
+} |
Ilya Sherman
2016/03/24 02:03:58
Sorry, I don't feel qualified to review this code
bcwhite
2016/03/24 14:22:34
Will do.
|
+ |
+PersistentMemoryAllocator::Reference |
+PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) { |
+ Reference ref; |
+ uint32_t type_found; |
+ while ((ref = GetNext(&type_found)) != 0) { |
+ if (type_found == type_match) |
+ return ref; |
+ } |
+ return kReferenceNull; |
+} |
// static |
bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base, |
@@ -501,68 +610,6 @@ void PersistentMemoryAllocator::MakeIterable(Reference ref) { |
} |
} |
-void PersistentMemoryAllocator::CreateIterator(Iterator* state, |
- Reference starting_after) const { |
- if (starting_after) { |
- // Ensure that the starting point is a valid, iterable block. |
- const volatile BlockHeader* block = |
- GetBlock(starting_after, 0, 0, false, false); |
- if (!block || !block->next.load()) { |
- NOTREACHED(); |
- starting_after = kReferenceQueue; |
- } |
- } else { |
- // A zero beginning is really the Queue reference. |
- starting_after = kReferenceQueue; |
- } |
- |
- state->last = starting_after; |
- state->niter = 0; |
-} |
- |
-PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetNextIterable( |
- Iterator* state, |
- uint32_t* type_id) const { |
- const volatile BlockHeader* block = GetBlock(state->last, 0, 0, true, false); |
- if (!block) // invalid iterator state |
- return kReferenceNull; |
- |
- // The compiler and CPU can freely reorder all memory accesses on which |
- // there are no dependencies. It could, for example, move the load of |
- // "freeptr" above this point because there are no explicit dependencies |
- // between it and "next". If it did, however, then another block could |
- // be queued after that but before the following load meaning there is |
- // one more queued block than the future "detect loop by having more |
- // blocks that could fit before freeptr" will allow. |
- // |
- // By "acquiring" the "next" value here, it's synchronized to the enqueue |
- // of the node which in turn is synchronized to the allocation (which sets |
- // freeptr). Thus, the scenario above cannot happen. |
- uint32_t next = block->next.load(std::memory_order_acquire); |
- block = GetBlock(next, 0, 0, false, false); |
- if (!block) // no next allocation in queue |
- return kReferenceNull; |
- |
- // Memory corruption could cause a loop in the list. We need to detect |
- // that so as to not cause an infinite loop in the caller. We do this |
- // simply by making sure we don't iterate more than the absolute maximum |
- // number of allocations that could have been made. Callers are likely |
- // to loop multiple times before it is detected but at least it stops. |
- uint32_t freeptr = std::min( |
- shared_meta()->freeptr.load(std::memory_order_acquire), |
- mem_size_); |
- if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) { |
- SetCorrupt(); |
- return kReferenceNull; |
- } |
- |
- state->last = next; |
- state->niter++; |
- *type_id = block->type_id; |
- |
- return next; |
-} |
- |
// The "corrupted" state is held both locally and globally (shared). The |
// shared flag can't be trusted since a malicious actor could overwrite it. |
// Because corruption can be detected during read-only operations such as |
@@ -610,13 +657,15 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id, |
// Validation of referenced block-header. |
if (!free_ok) { |
- uint32_t freeptr = shared_meta()->freeptr.load(); |
+ uint32_t freeptr = std::min(shared_meta()->freeptr.load(), mem_size_); |
if (ref + size > freeptr) |
return nullptr; |
const volatile BlockHeader* const block = |
reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); |
if (block->size < size) |
return nullptr; |
+ if (ref + block->size > freeptr) |
+ return nullptr; |
if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated) |
return nullptr; |
if (type_id != 0 && block->type_id != type_id) |