Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(293)

Side by Side Diff: base/metrics/persistent_memory_allocator.cc

Issue 2034813003: Make changing a record's type an atomic operation. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/metrics/persistent_memory_allocator.h" 5 #include "base/metrics/persistent_memory_allocator.h"
6 6
7 #include <assert.h> 7 #include <assert.h>
8 #include <algorithm> 8 #include <algorithm>
9 9
10 #include "base/files/memory_mapped_file.h" 10 #include "base/files/memory_mapped_file.h"
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
71 // padding out allocations. An alignment of 16 would ensure that the block 71 // padding out allocations. An alignment of 16 would ensure that the block
72 // header structure always sits in a single cache line. An average of about 72 // header structure always sits in a single cache line. An average of about
73 // 1/2 this value will be wasted with every allocation. 73 // 1/2 this value will be wasted with every allocation.
74 const uint32_t PersistentMemoryAllocator::kAllocAlignment = 8; 74 const uint32_t PersistentMemoryAllocator::kAllocAlignment = 8;
75 75
76 // The block-header is placed at the top of every allocation within the 76 // The block-header is placed at the top of every allocation within the
77 // segment to describe the data that follows it. 77 // segment to describe the data that follows it.
78 struct PersistentMemoryAllocator::BlockHeader { 78 struct PersistentMemoryAllocator::BlockHeader {
79 uint32_t size; // Number of bytes in this block, including header. 79 uint32_t size; // Number of bytes in this block, including header.
80 uint32_t cookie; // Constant value indicating completed allocation. 80 uint32_t cookie; // Constant value indicating completed allocation.
81 uint32_t type_id; // A number provided by caller indicating data type. 81 std::atomic<uint32_t> type_id; // Arbitrary number indicating data type.
82 std::atomic<uint32_t> next; // Pointer to the next block when iterating. 82 std::atomic<uint32_t> next; // Pointer to the next block when iterating.
83 }; 83 };
84 84
85 // The shared metadata exists once at the top of the memory segment to 85 // The shared metadata exists once at the top of the memory segment to
86 // describe the state of the allocator to all processes. 86 // describe the state of the allocator to all processes.
87 struct PersistentMemoryAllocator::SharedMetadata { 87 struct PersistentMemoryAllocator::SharedMetadata {
88 uint32_t cookie; // Some value that indicates complete initialization. 88 uint32_t cookie; // Some value that indicates complete initialization.
89 uint32_t size; // Total size of memory segment. 89 uint32_t size; // Total size of memory segment.
90 uint32_t page_size; // Paging size within memory segment. 90 uint32_t page_size; // Paging size within memory segment.
91 uint32_t version; // Version code so upgrades don't break. 91 uint32_t version; // Version code so upgrades don't break.
92 uint64_t id; // Arbitrary ID number given by creator. 92 uint64_t id; // Arbitrary ID number given by creator.
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
181 return kReferenceNull; 181 return kReferenceNull;
182 } 182 }
183 183
184 // Update the "last_record" pointer to be the reference being returned. 184 // Update the "last_record" pointer to be the reference being returned.
185 // If it fails then another thread has already iterated past it so loop 185 // If it fails then another thread has already iterated past it so loop
186 // again. Failing will also load the existing value into "last" so there 186 // again. Failing will also load the existing value into "last" so there
187 // is no need to do another such load when the while-loop restarts. A 187 // is no need to do another such load when the while-loop restarts. A
188 // "strong" compare-exchange is used because failing unnecessarily would 188 // "strong" compare-exchange is used because failing unnecessarily would
189 // mean repeating some fairly costly validations above. 189 // mean repeating some fairly costly validations above.
190 if (last_record_.compare_exchange_strong(last, next)) { 190 if (last_record_.compare_exchange_strong(last, next)) {
191 *type_return = block->type_id; 191 *type_return = block->type_id.load(std::memory_order_relaxed);
192 break; 192 break;
193 } 193 }
194 } 194 }
195 195
196 // Memory corruption could cause a loop in the list. Such must be detected 196 // Memory corruption could cause a loop in the list. Such must be detected
197 // so as to not cause an infinite loop in the caller. This is done by simply 197 // so as to not cause an infinite loop in the caller. This is done by simply
198 // making sure it doesn't iterate more times than the absolute maximum 198 // making sure it doesn't iterate more times than the absolute maximum
199 // number of allocations that could have been made. Callers are likely 199 // number of allocations that could have been made. Callers are likely
200 // to loop multiple times before it is detected but at least it stops. 200 // to loop multiple times before it is detected but at least it stops.
201 const uint32_t freeptr = std::min( 201 const uint32_t freeptr = std::min(
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
288 shared_meta()->version != 0 || 288 shared_meta()->version != 0 ||
289 shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 || 289 shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
290 shared_meta()->flags.load(std::memory_order_relaxed) != 0 || 290 shared_meta()->flags.load(std::memory_order_relaxed) != 0 ||
291 shared_meta()->id != 0 || 291 shared_meta()->id != 0 ||
292 shared_meta()->name != 0 || 292 shared_meta()->name != 0 ||
293 shared_meta()->tailptr != 0 || 293 shared_meta()->tailptr != 0 ||
294 shared_meta()->queue.cookie != 0 || 294 shared_meta()->queue.cookie != 0 ||
295 shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 || 295 shared_meta()->queue.next.load(std::memory_order_relaxed) != 0 ||
296 first_block->size != 0 || 296 first_block->size != 0 ||
297 first_block->cookie != 0 || 297 first_block->cookie != 0 ||
298 first_block->type_id != 0 || 298 first_block->type_id.load(std::memory_order_relaxed) != 0 ||
299 first_block->next != 0) { 299 first_block->next != 0) {
300 // ...or something malicious has been playing with the metadata. 300 // ...or something malicious has been playing with the metadata.
301 SetCorrupt(); 301 SetCorrupt();
302 } 302 }
303 303
304 // This is still safe to do even if corruption has been detected. 304 // This is still safe to do even if corruption has been detected.
305 shared_meta()->cookie = kGlobalCookie; 305 shared_meta()->cookie = kGlobalCookie;
306 shared_meta()->size = mem_size_; 306 shared_meta()->size = mem_size_;
307 shared_meta()->page_size = mem_page_; 307 shared_meta()->page_size = mem_page_;
308 shared_meta()->version = kGlobalVersion; 308 shared_meta()->version = kGlobalVersion;
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
415 SetCorrupt(); 415 SetCorrupt();
416 return 0; 416 return 0;
417 } 417 }
418 return size - sizeof(BlockHeader); 418 return size - sizeof(BlockHeader);
419 } 419 }
420 420
421 uint32_t PersistentMemoryAllocator::GetType(Reference ref) const { 421 uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
422 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); 422 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
423 if (!block) 423 if (!block)
424 return 0; 424 return 0;
425 return block->type_id; 425 return block->type_id.load(std::memory_order_relaxed);
426 } 426 }
427 427
428 void PersistentMemoryAllocator::SetType(Reference ref, uint32_t type_id) { 428 bool PersistentMemoryAllocator::ChangeType(Reference ref,
429 uint32_t to_type_id,
430 uint32_t from_type_id) {
429 DCHECK(!readonly_); 431 DCHECK(!readonly_);
430 volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); 432 volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
431 if (!block) 433 if (!block)
432 return; 434 return false;
433 block->type_id = type_id; 435
436 // This is a "strong" exchange because there is no loop that can retry in
437 // the wake of spurious failures possible with "weak" exchanges.
438 return block->type_id.compare_exchange_strong(from_type_id, to_type_id);
434 } 439 }
435 440
436 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate( 441 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
437 size_t req_size, 442 size_t req_size,
438 uint32_t type_id) { 443 uint32_t type_id) {
439 Reference ref = AllocateImpl(req_size, type_id); 444 Reference ref = AllocateImpl(req_size, type_id);
440 if (ref) { 445 if (ref) {
441 // Success: Record this allocation in usage stats (if active). 446 // Success: Record this allocation in usage stats (if active).
442 if (allocs_histogram_) 447 if (allocs_histogram_)
443 allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size)); 448 allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size));
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
537 continue; 542 continue;
538 543
539 // Given that all memory was zeroed before ever being given to an instance 544 // Given that all memory was zeroed before ever being given to an instance
540 // of this class and given that we only allocate in a monotomic fashion 545 // of this class and given that we only allocate in a monotomic fashion
541 // going forward, it must be that the newly allocated block is completely 546 // going forward, it must be that the newly allocated block is completely
542 // full of zeros. If we find anything in the block header that is NOT a 547 // full of zeros. If we find anything in the block header that is NOT a
543 // zero then something must have previously run amuck through memory, 548 // zero then something must have previously run amuck through memory,
544 // writing beyond the allocated space and into unallocated space. 549 // writing beyond the allocated space and into unallocated space.
545 if (block->size != 0 || 550 if (block->size != 0 ||
546 block->cookie != kBlockCookieFree || 551 block->cookie != kBlockCookieFree ||
547 block->type_id != 0 || 552 block->type_id.load(std::memory_order_relaxed) != 0 ||
548 block->next.load(std::memory_order_relaxed) != 0) { 553 block->next.load(std::memory_order_relaxed) != 0) {
549 SetCorrupt(); 554 SetCorrupt();
550 return kReferenceNull; 555 return kReferenceNull;
551 } 556 }
552 557
553 block->size = size; 558 block->size = size;
554 block->cookie = kBlockCookieAllocated; 559 block->cookie = kBlockCookieAllocated;
555 block->type_id = type_id; 560 block->type_id.store(type_id, std::memory_order_relaxed);
556 return freeptr; 561 return freeptr;
557 } 562 }
558 } 563 }
559 564
560 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const { 565 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
561 uint32_t remaining = std::max( 566 uint32_t remaining = std::max(
562 mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed), 567 mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
563 (uint32_t)sizeof(BlockHeader)); 568 (uint32_t)sizeof(BlockHeader));
564 meminfo->total = mem_size_; 569 meminfo->total = mem_size_;
565 meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader); 570 meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader);
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
677 if (ref + size > freeptr) 682 if (ref + size > freeptr)
678 return nullptr; 683 return nullptr;
679 const volatile BlockHeader* const block = 684 const volatile BlockHeader* const block =
680 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); 685 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
681 if (block->size < size) 686 if (block->size < size)
682 return nullptr; 687 return nullptr;
683 if (ref + block->size > freeptr) 688 if (ref + block->size > freeptr)
684 return nullptr; 689 return nullptr;
685 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated) 690 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
686 return nullptr; 691 return nullptr;
687 if (type_id != 0 && block->type_id != type_id) 692 if (type_id != 0 &&
693 block->type_id.load(std::memory_order_relaxed) != type_id) {
688 return nullptr; 694 return nullptr;
695 }
689 } 696 }
690 697
691 // Return pointer to block data. 698 // Return pointer to block data.
692 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref); 699 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
693 } 700 }
694 701
695 const volatile void* PersistentMemoryAllocator::GetBlockData( 702 const volatile void* PersistentMemoryAllocator::GetBlockData(
696 Reference ref, 703 Reference ref,
697 uint32_t type_id, 704 uint32_t type_id,
698 uint32_t size) const { 705 uint32_t size) const {
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
773 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {} 780 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
774 781
775 // static 782 // static
776 bool FilePersistentMemoryAllocator::IsFileAcceptable( 783 bool FilePersistentMemoryAllocator::IsFileAcceptable(
777 const MemoryMappedFile& file, 784 const MemoryMappedFile& file,
778 bool read_only) { 785 bool read_only) {
779 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only); 786 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
780 } 787 }
781 788
782 } // namespace base 789 } // namespace base
OLDNEW
« no previous file with comments | « base/metrics/persistent_memory_allocator.h ('k') | base/metrics/persistent_memory_allocator_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698