Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(108)

Side by Side Diff: base/metrics/persistent_memory_allocator.cc

Issue 2742193002: Harden allocator for file-backed memory. (Closed)
Patch Set: addressed final review comments Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/metrics/persistent_memory_allocator.h" 5 #include "base/metrics/persistent_memory_allocator.h"
6 6
7 #include <assert.h> 7 #include <assert.h>
8 #include <algorithm> 8 #include <algorithm>
9 9
10 #if defined(OS_WIN) 10 #if defined(OS_WIN)
11 #include "winbase.h" 11 #include "winbase.h"
12 #elif defined(OS_POSIX) 12 #elif defined(OS_POSIX)
13 #include <sys/mman.h> 13 #include <sys/mman.h>
14 #endif 14 #endif
15 15
16 #include "base/files/memory_mapped_file.h" 16 #include "base/files/memory_mapped_file.h"
17 #include "base/logging.h" 17 #include "base/logging.h"
18 #include "base/memory/shared_memory.h" 18 #include "base/memory/shared_memory.h"
19 #include "base/metrics/histogram_macros.h" 19 #include "base/metrics/histogram_macros.h"
20 #include "base/metrics/sparse_histogram.h" 20 #include "base/metrics/sparse_histogram.h"
21 #include "base/threading/thread_restrictions.h"
21 22
22 namespace { 23 namespace {
23 24
24 // Limit of memory segment size. It has to fit in an unsigned 32-bit number 25 // Limit of memory segment size. It has to fit in an unsigned 32-bit number
25 // and should be a power of 2 in order to accomodate almost any page size. 26 // and should be a power of 2 in order to accomodate almost any page size.
26 const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB 27 const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB
27 28
28 // A constant (random) value placed in the shared metadata to identify 29 // A constant (random) value placed in the shared metadata to identify
29 // an already initialized memory segment. 30 // an already initialized memory segment.
30 const uint32_t kGlobalCookie = 0x408305DC; 31 const uint32_t kGlobalCookie = 0x408305DC;
31 32
32 // The current version of the metadata. If updates are made that change 33 // The current version of the metadata. If updates are made that change
33 // the metadata, the version number can be queried to operate in a backward- 34 // the metadata, the version number can be queried to operate in a backward-
34 // compatible manner until the memory segment is completely re-initalized. 35 // compatible manner until the memory segment is completely re-initalized.
35 const uint32_t kGlobalVersion = 1; 36 const uint32_t kGlobalVersion = 2;
36 37
37 // Constant values placed in the block headers to indicate its state. 38 // Constant values placed in the block headers to indicate its state.
38 const uint32_t kBlockCookieFree = 0; 39 const uint32_t kBlockCookieFree = 0;
39 const uint32_t kBlockCookieQueue = 1; 40 const uint32_t kBlockCookieQueue = 1;
40 const uint32_t kBlockCookieWasted = (uint32_t)-1; 41 const uint32_t kBlockCookieWasted = (uint32_t)-1;
41 const uint32_t kBlockCookieAllocated = 0xC8799269; 42 const uint32_t kBlockCookieAllocated = 0xC8799269;
42 43
43 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> 44 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
44 // types rather than combined bitfield. 45 // types rather than combined bitfield.
45 46
46 // Flags stored in the flags_ field of the SharedMetaData structure below. 47 // Flags stored in the flags_ field of the SharedMetadata structure below.
47 enum : int { 48 enum : int {
48 kFlagCorrupt = 1 << 0, 49 kFlagCorrupt = 1 << 0,
49 kFlagFull = 1 << 1 50 kFlagFull = 1 << 1
50 }; 51 };
51 52
52 // Errors that are logged in "errors" histogram. 53 // Errors that are logged in "errors" histogram.
53 enum AllocatorError : int { 54 enum AllocatorError : int {
54 kMemoryIsCorrupt = 1, 55 kMemoryIsCorrupt = 1,
55 }; 56 };
56 57
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 // The block-header is placed at the top of every allocation within the 94 // The block-header is placed at the top of every allocation within the
94 // segment to describe the data that follows it. 95 // segment to describe the data that follows it.
95 struct PersistentMemoryAllocator::BlockHeader { 96 struct PersistentMemoryAllocator::BlockHeader {
96 uint32_t size; // Number of bytes in this block, including header. 97 uint32_t size; // Number of bytes in this block, including header.
97 uint32_t cookie; // Constant value indicating completed allocation. 98 uint32_t cookie; // Constant value indicating completed allocation.
98 std::atomic<uint32_t> type_id; // Arbitrary number indicating data type. 99 std::atomic<uint32_t> type_id; // Arbitrary number indicating data type.
99 std::atomic<uint32_t> next; // Pointer to the next block when iterating. 100 std::atomic<uint32_t> next; // Pointer to the next block when iterating.
100 }; 101 };
101 102
102 // The shared metadata exists once at the top of the memory segment to 103 // The shared metadata exists once at the top of the memory segment to
103 // describe the state of the allocator to all processes. 104 // describe the state of the allocator to all processes. The size of this
105 // structure must be a multiple of 64-bits to ensure compatibility between
106 // architectures.
104 struct PersistentMemoryAllocator::SharedMetadata { 107 struct PersistentMemoryAllocator::SharedMetadata {
105 uint32_t cookie; // Some value that indicates complete initialization. 108 uint32_t cookie; // Some value that indicates complete initialization.
106 uint32_t size; // Total size of memory segment. 109 uint32_t size; // Total size of memory segment.
107 uint32_t page_size; // Paging size within memory segment. 110 uint32_t page_size; // Paging size within memory segment.
108 uint32_t version; // Version code so upgrades don't break. 111 uint32_t version; // Version code so upgrades don't break.
109 uint64_t id; // Arbitrary ID number given by creator. 112 uint64_t id; // Arbitrary ID number given by creator.
110 uint32_t name; // Reference to stored name string. 113 uint32_t name; // Reference to stored name string.
114 uint32_t padding1; // Pad-out read-only data to 64-bit alignment.
111 115
112 // Above is read-only after first construction. Below may be changed and 116 // Above is read-only after first construction. Below may be changed and
113 // so must be marked "volatile" to provide correct inter-process behavior. 117 // so must be marked "volatile" to provide correct inter-process behavior.
114 118
119 // State of the memory, plus some padding to keep alignment.
120 volatile std::atomic<uint8_t> memory_state; // MemoryState enum values.
121 uint8_t padding2[3];
122
115 // Bitfield of information flags. Access to this should be done through 123 // Bitfield of information flags. Access to this should be done through
116 // the CheckFlag() and SetFlag() methods defined above. 124 // the CheckFlag() and SetFlag() methods defined above.
117 volatile std::atomic<uint32_t> flags; 125 volatile std::atomic<uint32_t> flags;
118 126
119 // Offset/reference to first free space in segment. 127 // Offset/reference to first free space in segment.
120 volatile std::atomic<uint32_t> freeptr; 128 volatile std::atomic<uint32_t> freeptr;
121 129
122 // The "iterable" queue is an M&S Queue as described here, append-only: 130 // The "iterable" queue is an M&S Queue as described here, append-only:
123 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf 131 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
132 // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
124 volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue. 133 volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue.
125 volatile BlockHeader queue; // Empty block for linked-list head/tail. 134 volatile BlockHeader queue; // Empty block for linked-list head/tail.
126 }; 135 };
127 136
128 // The "queue" block header is used to detect "last node" so that zero/null 137 // The "queue" block header is used to detect "last node" so that zero/null
129 // can be used to indicate that it hasn't been added at all. It is part of 138 // can be used to indicate that it hasn't been added at all. It is part of
130 // the SharedMetadata structure which itself is always located at offset zero. 139 // the SharedMetadata structure which itself is always located at offset zero.
131 const PersistentMemoryAllocator::Reference 140 const PersistentMemoryAllocator::Reference
132 PersistentMemoryAllocator::kReferenceQueue = 141 PersistentMemoryAllocator::kReferenceQueue =
133 offsetof(SharedMetadata, queue); 142 offsetof(SharedMetadata, queue);
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
305 readonly_(readonly), 314 readonly_(readonly),
306 corrupt_(0), 315 corrupt_(0),
307 allocs_histogram_(nullptr), 316 allocs_histogram_(nullptr),
308 used_histogram_(nullptr), 317 used_histogram_(nullptr),
309 errors_histogram_(nullptr) { 318 errors_histogram_(nullptr) {
310 // These asserts ensure that the structures are 32/64-bit agnostic and meet 319 // These asserts ensure that the structures are 32/64-bit agnostic and meet
311 // all the requirements of use within the allocator. They access private 320 // all the requirements of use within the allocator. They access private
312 // definitions and so cannot be moved to the global scope. 321 // definitions and so cannot be moved to the global scope.
313 static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16, 322 static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
314 "struct is not portable across different natural word widths"); 323 "struct is not portable across different natural word widths");
315 static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56, 324 static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
316 "struct is not portable across different natural word widths"); 325 "struct is not portable across different natural word widths");
317 326
318 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, 327 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
319 "BlockHeader is not a multiple of kAllocAlignment"); 328 "BlockHeader is not a multiple of kAllocAlignment");
320 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, 329 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
321 "SharedMetadata is not a multiple of kAllocAlignment"); 330 "SharedMetadata is not a multiple of kAllocAlignment");
322 static_assert(kReferenceQueue % kAllocAlignment == 0, 331 static_assert(kReferenceQueue % kAllocAlignment == 0,
323 "\"queue\" is not aligned properly; must be at end of struct"); 332 "\"queue\" is not aligned properly; must be at end of struct");
324 333
325 // Ensure that memory segment is of acceptable size. 334 // Ensure that memory segment is of acceptable size.
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
377 shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release); 386 shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
378 387
379 // Allocate space for the name so other processes can learn it. 388 // Allocate space for the name so other processes can learn it.
380 if (!name.empty()) { 389 if (!name.empty()) {
381 const size_t name_length = name.length() + 1; 390 const size_t name_length = name.length() + 1;
382 shared_meta()->name = Allocate(name_length, 0); 391 shared_meta()->name = Allocate(name_length, 0);
383 char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length); 392 char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
384 if (name_cstr) 393 if (name_cstr)
385 memcpy(name_cstr, name.data(), name.length()); 394 memcpy(name_cstr, name.data(), name.length());
386 } 395 }
396
397 shared_meta()->memory_state.store(MEMORY_INITIALIZED,
398 std::memory_order_release);
387 } else { 399 } else {
388 if (shared_meta()->size == 0 || 400 if (shared_meta()->size == 0 || shared_meta()->version != kGlobalVersion ||
389 shared_meta()->version == 0 ||
390 shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 || 401 shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
391 shared_meta()->tailptr == 0 || 402 shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
392 shared_meta()->queue.cookie == 0 ||
393 shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) { 403 shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
394 SetCorrupt(); 404 SetCorrupt();
395 } 405 }
396 if (!readonly) { 406 if (!readonly) {
397 // The allocator is attaching to a previously initialized segment of 407 // The allocator is attaching to a previously initialized segment of
398 // memory. If the initialization parameters differ, make the best of it 408 // memory. If the initialization parameters differ, make the best of it
399 // by reducing the local construction parameters to match those of 409 // by reducing the local construction parameters to match those of
400 // the actual memory area. This ensures that the local object never 410 // the actual memory area. This ensures that the local object never
401 // tries to write outside of the original bounds. 411 // tries to write outside of the original bounds.
402 // Because the fields are const to ensure that no code other than the 412 // Because the fields are const to ensure that no code other than the
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
463 used_histogram_ = LinearHistogram::FactoryGet( 473 used_histogram_ = LinearHistogram::FactoryGet(
464 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21, 474 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
465 HistogramBase::kUmaTargetedHistogramFlag); 475 HistogramBase::kUmaTargetedHistogramFlag);
466 476
467 DCHECK(!errors_histogram_); 477 DCHECK(!errors_histogram_);
468 errors_histogram_ = SparseHistogram::FactoryGet( 478 errors_histogram_ = SparseHistogram::FactoryGet(
469 "UMA.PersistentAllocator." + name_string + ".Errors", 479 "UMA.PersistentAllocator." + name_string + ".Errors",
470 HistogramBase::kUmaTargetedHistogramFlag); 480 HistogramBase::kUmaTargetedHistogramFlag);
471 } 481 }
472 482
483 void PersistentMemoryAllocator::Flush(bool sync) {
484 FlushPartial(used(), sync);
485 }
486
487 void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
488 shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
489 FlushPartial(sizeof(SharedMetadata), false);
490 }
491
492 uint8_t PersistentMemoryAllocator::GetMemoryState() const {
493 return shared_meta()->memory_state.load(std::memory_order_relaxed);
494 }
495
473 size_t PersistentMemoryAllocator::used() const { 496 size_t PersistentMemoryAllocator::used() const {
474 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed), 497 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
475 mem_size_); 498 mem_size_);
476 } 499 }
477 500
478 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference( 501 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
479 const void* memory, 502 const void* memory,
480 uint32_t type_id) const { 503 uint32_t type_id) const {
481 uintptr_t address = reinterpret_cast<uintptr_t>(memory); 504 uintptr_t address = reinterpret_cast<uintptr_t>(memory);
482 if (address < reinterpret_cast<uintptr_t>(mem_base_)) 505 if (address < reinterpret_cast<uintptr_t>(mem_base_))
(...skipping 317 matching lines...) Expand 10 before | Expand all | Expand 10 after
800 823
801 // Dereference a block |ref| and ensure that it's valid for the desired 824 // Dereference a block |ref| and ensure that it's valid for the desired
802 // |type_id| and |size|. |special| indicates that we may try to access block 825 // |type_id| and |size|. |special| indicates that we may try to access block
803 // headers not available to callers but still accessed by this module. By 826 // headers not available to callers but still accessed by this module. By
804 // having internal dereferences go through this same function, the allocator 827 // having internal dereferences go through this same function, the allocator
805 // is hardened against corruption. 828 // is hardened against corruption.
806 const volatile PersistentMemoryAllocator::BlockHeader* 829 const volatile PersistentMemoryAllocator::BlockHeader*
807 PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id, 830 PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
808 uint32_t size, bool queue_ok, 831 uint32_t size, bool queue_ok,
809 bool free_ok) const { 832 bool free_ok) const {
833 // Handle special cases.
834 if (ref == kReferenceQueue && queue_ok)
835 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
836
810 // Validation of parameters. 837 // Validation of parameters.
811 if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata))) 838 if (ref < sizeof(SharedMetadata))
812 return nullptr; 839 return nullptr;
813 if (ref % kAllocAlignment != 0) 840 if (ref % kAllocAlignment != 0)
814 return nullptr; 841 return nullptr;
815 size += sizeof(BlockHeader); 842 size += sizeof(BlockHeader);
816 if (ref + size > mem_size_) 843 if (ref + size > mem_size_)
817 return nullptr; 844 return nullptr;
818 845
819 // Validation of referenced block-header. 846 // Validation of referenced block-header.
820 if (!free_ok) { 847 if (!free_ok) {
821 uint32_t freeptr = std::min(
822 shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
823 if (ref + size > freeptr)
824 return nullptr;
825 const volatile BlockHeader* const block = 848 const volatile BlockHeader* const block =
826 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); 849 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
850 if (block->cookie != kBlockCookieAllocated)
851 return nullptr;
827 if (block->size < size) 852 if (block->size < size)
828 return nullptr; 853 return nullptr;
829 if (ref + block->size > freeptr) 854 if (ref + block->size > mem_size_)
830 return nullptr;
831 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
832 return nullptr; 855 return nullptr;
833 if (type_id != 0 && 856 if (type_id != 0 &&
834 block->type_id.load(std::memory_order_relaxed) != type_id) { 857 block->type_id.load(std::memory_order_relaxed) != type_id) {
835 return nullptr; 858 return nullptr;
836 } 859 }
837 } 860 }
838 861
839 // Return pointer to block data. 862 // Return pointer to block data.
840 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref); 863 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
841 } 864 }
842 865
866 void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
867 // Generally there is nothing to do as every write is done through volatile
868 // memory with atomic instructions to guarantee consistency. This (virtual)
869 // method exists so that derivced classes can do special things, such as
870 // tell the OS to write changes to disk now rather than when convenient.
871 }
872
843 void PersistentMemoryAllocator::RecordError(int error) const { 873 void PersistentMemoryAllocator::RecordError(int error) const {
844 if (errors_histogram_) 874 if (errors_histogram_)
845 errors_histogram_->Add(error); 875 errors_histogram_->Add(error);
846 } 876 }
847 877
848 const volatile void* PersistentMemoryAllocator::GetBlockData( 878 const volatile void* PersistentMemoryAllocator::GetBlockData(
849 Reference ref, 879 Reference ref,
850 uint32_t type_id, 880 uint32_t type_id,
851 uint32_t size) const { 881 uint32_t size) const {
852 DCHECK(size > 0); 882 DCHECK(size > 0);
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
973 uint64_t id, 1003 uint64_t id,
974 base::StringPiece name, 1004 base::StringPiece name,
975 bool read_only) 1005 bool read_only)
976 : PersistentMemoryAllocator( 1006 : PersistentMemoryAllocator(
977 Memory(const_cast<uint8_t*>(file->data()), MEM_FILE), 1007 Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
978 max_size != 0 ? max_size : file->length(), 1008 max_size != 0 ? max_size : file->length(),
979 0, 1009 0,
980 id, 1010 id,
981 name, 1011 name,
982 read_only), 1012 read_only),
983 mapped_file_(std::move(file)) {} 1013 mapped_file_(std::move(file)) {
1014 // Ensure the disk-copy of the data reflects the fully-initialized memory as
1015 // there is no guarantee as to what order the pages might be auto-flushed by
1016 // the OS in the future.
1017 Flush(true);
1018 }
984 1019
985 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {} 1020 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
986 1021
987 // static 1022 // static
988 bool FilePersistentMemoryAllocator::IsFileAcceptable( 1023 bool FilePersistentMemoryAllocator::IsFileAcceptable(
989 const MemoryMappedFile& file, 1024 const MemoryMappedFile& file,
990 bool read_only) { 1025 bool read_only) {
991 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only); 1026 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
992 } 1027 }
1028
1029 void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
1030 if (sync)
1031 ThreadRestrictions::AssertIOAllowed();
1032 if (IsReadonly())
1033 return;
1034
1035 #if defined(OS_WIN)
1036 // Windows doesn't support a synchronous flush.
1037 BOOL success = ::FlushViewOfFile(data(), length);
1038 DPCHECK(success);
1039 #elif defined(OS_MACOSX)
1040 // On OSX, "invalidate" removes all cached pages, forcing a re-read from
1041 // disk. That's not applicable to "flush" so omit it.
1042 int result =
1043 ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
1044 DCHECK_NE(EINVAL, result);
1045 #elif defined(OS_POSIX)
1046 // On POSIX, "invalidate" forces _other_ processes to recognize what has
1047 // been written to disk and so is applicable to "flush".
1048 int result = ::msync(const_cast<void*>(data()), length,
1049 MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
1050 DCHECK_NE(EINVAL, result);
1051 #else
1052 #error Unsupported OS.
1053 #endif
1054 }
993 #endif // !defined(OS_NACL) 1055 #endif // !defined(OS_NACL)
994 1056
995 } // namespace base 1057 } // namespace base
OLDNEW
« no previous file with comments | « base/metrics/persistent_memory_allocator.h ('k') | base/metrics/persistent_memory_allocator_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698