Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(35)

Side by Side Diff: base/metrics/persistent_memory_allocator.cc

Issue 2742193002: Harden allocator for file-backed memory. (Closed)
Patch Set: fix some build problems Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/metrics/persistent_memory_allocator.h" 5 #include "base/metrics/persistent_memory_allocator.h"
6 6
7 #include <assert.h> 7 #include <assert.h>
8 #include <algorithm> 8 #include <algorithm>
9 9
10 #if defined(OS_WIN) 10 #if defined(OS_WIN)
11 #include "winbase.h" 11 #include "winbase.h"
12 #elif defined(OS_POSIX) 12 #elif defined(OS_POSIX)
13 #include <sys/mman.h> 13 #include <sys/mman.h>
14 #endif 14 #endif
15 15
16 #include "base/files/memory_mapped_file.h" 16 #include "base/files/memory_mapped_file.h"
17 #include "base/logging.h" 17 #include "base/logging.h"
18 #include "base/memory/shared_memory.h" 18 #include "base/memory/shared_memory.h"
19 #include "base/metrics/histogram_macros.h" 19 #include "base/metrics/histogram_macros.h"
20 #include "base/metrics/sparse_histogram.h" 20 #include "base/metrics/sparse_histogram.h"
21 #include "base/threading/thread_restrictions.h"
21 22
22 namespace { 23 namespace {
23 24
24 // Limit of memory segment size. It has to fit in an unsigned 32-bit number 25 // Limit of memory segment size. It has to fit in an unsigned 32-bit number
25 // and should be a power of 2 in order to accomodate almost any page size. 26 // and should be a power of 2 in order to accomodate almost any page size.
26 const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB 27 const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB
27 28
28 // A constant (random) value placed in the shared metadata to identify 29 // A constant (random) value placed in the shared metadata to identify
29 // an already initialized memory segment. 30 // an already initialized memory segment.
30 const uint32_t kGlobalCookie = 0x408305DC; 31 const uint32_t kGlobalCookie = 0x408305DC;
31 32
32 // The current version of the metadata. If updates are made that change 33 // The current version of the metadata. If updates are made that change
33 // the metadata, the version number can be queried to operate in a backward- 34 // the metadata, the version number can be queried to operate in a backward-
34 // compatible manner until the memory segment is completely re-initalized. 35 // compatible manner until the memory segment is completely re-initalized.
35 const uint32_t kGlobalVersion = 1; 36 const uint32_t kGlobalVersion = 2;
36 37
37 // Constant values placed in the block headers to indicate its state. 38 // Constant values placed in the block headers to indicate its state.
38 const uint32_t kBlockCookieFree = 0; 39 const uint32_t kBlockCookieFree = 0;
39 const uint32_t kBlockCookieQueue = 1; 40 const uint32_t kBlockCookieQueue = 1;
40 const uint32_t kBlockCookieWasted = (uint32_t)-1; 41 const uint32_t kBlockCookieWasted = (uint32_t)-1;
41 const uint32_t kBlockCookieAllocated = 0xC8799269; 42 const uint32_t kBlockCookieAllocated = 0xC8799269;
42 43
43 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> 44 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
44 // types rather than combined bitfield. 45 // types rather than combined bitfield.
45 46
46 // Flags stored in the flags_ field of the SharedMetaData structure below. 47 // Flags stored in the flags_ field of the SharedMetadata structure below.
47 enum : int { 48 enum : int {
48 kFlagCorrupt = 1 << 0, 49 kFlagCorrupt = 1 << 0,
49 kFlagFull = 1 << 1 50 kFlagFull = 1 << 1
50 }; 51 };
51 52
52 // Errors that are logged in "errors" histogram. 53 // Errors that are logged in "errors" histogram.
53 enum AllocatorError : int { 54 enum AllocatorError : int {
54 kMemoryIsCorrupt = 1, 55 kMemoryIsCorrupt = 1,
55 }; 56 };
56 57
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
93 // The block-header is placed at the top of every allocation within the 94 // The block-header is placed at the top of every allocation within the
94 // segment to describe the data that follows it. 95 // segment to describe the data that follows it.
95 struct PersistentMemoryAllocator::BlockHeader { 96 struct PersistentMemoryAllocator::BlockHeader {
96 uint32_t size; // Number of bytes in this block, including header. 97 uint32_t size; // Number of bytes in this block, including header.
97 uint32_t cookie; // Constant value indicating completed allocation. 98 uint32_t cookie; // Constant value indicating completed allocation.
98 std::atomic<uint32_t> type_id; // Arbitrary number indicating data type. 99 std::atomic<uint32_t> type_id; // Arbitrary number indicating data type.
99 std::atomic<uint32_t> next; // Pointer to the next block when iterating. 100 std::atomic<uint32_t> next; // Pointer to the next block when iterating.
100 }; 101 };
101 102
102 // The shared metadata exists once at the top of the memory segment to 103 // The shared metadata exists once at the top of the memory segment to
103 // describe the state of the allocator to all processes. 104 // describe the state of the allocator to all processes. The size of this
105 // structure must be a multiple of 64-bits to ensure compatibility between
106 // architectures.
Alexei Svitkine (slow) 2017/03/15 15:47:20 Can you have a static_assert about that? I know y
bcwhite 2017/03/15 19:21:48 It's effectively on line 324 which ensures the sam
104 struct PersistentMemoryAllocator::SharedMetadata { 107 struct PersistentMemoryAllocator::SharedMetadata {
105 uint32_t cookie; // Some value that indicates complete initialization. 108 uint32_t cookie; // Some value that indicates complete initialization.
106 uint32_t size; // Total size of memory segment. 109 uint32_t size; // Total size of memory segment.
107 uint32_t page_size; // Paging size within memory segment. 110 uint32_t page_size; // Paging size within memory segment.
108 uint32_t version; // Version code so upgrades don't break. 111 uint32_t version; // Version code so upgrades don't break.
109 uint64_t id; // Arbitrary ID number given by creator. 112 uint64_t id; // Arbitrary ID number given by creator.
110 uint32_t name; // Reference to stored name string. 113 uint32_t name; // Reference to stored name string.
114 uint32_t padding1; // Pad-out read-only data to 64-bit alignment.
111 115
112 // Above is read-only after first construction. Below may be changed and 116 // Above is read-only after first construction. Below may be changed and
113 // so must be marked "volatile" to provide correct inter-process behavior. 117 // so must be marked "volatile" to provide correct inter-process behavior.
114 118
119 // State of the memory, plus some padding to keep alignment.
Alexei Svitkine (slow) 2017/03/15 15:47:20 What does "state of the memory" mean? Is it refere
bcwhite 2017/03/15 19:21:47 Yes, MemoryState in the .h file. Done.
Alexei Svitkine (slow) 2017/03/15 20:05:29 I think on all platforms Chrome supports uint8_t s
bcwhite 2017/03/16 15:53:03 Done.
120 volatile std::atomic<char> memory_state;
121 char padding2[3];
122
115 // Bitfield of information flags. Access to this should be done through 123 // Bitfield of information flags. Access to this should be done through
116 // the CheckFlag() and SetFlag() methods defined above. 124 // the CheckFlag() and SetFlag() methods defined above.
117 volatile std::atomic<uint32_t> flags; 125 volatile std::atomic<uint32_t> flags;
118 126
119 // Offset/reference to first free space in segment. 127 // Offset/reference to first free space in segment.
120 volatile std::atomic<uint32_t> freeptr; 128 volatile std::atomic<uint32_t> freeptr;
121 129
122 // The "iterable" queue is an M&S Queue as described here, append-only: 130 // The "iterable" queue is an M&S Queue as described here, append-only:
123 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf 131 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
132 // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits.
124 volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue. 133 volatile std::atomic<uint32_t> tailptr; // Last block of iteration queue.
125 volatile BlockHeader queue; // Empty block for linked-list head/tail. 134 volatile BlockHeader queue; // Empty block for linked-list head/tail.
126 }; 135 };
127 136
128 // The "queue" block header is used to detect "last node" so that zero/null 137 // The "queue" block header is used to detect "last node" so that zero/null
129 // can be used to indicate that it hasn't been added at all. It is part of 138 // can be used to indicate that it hasn't been added at all. It is part of
130 // the SharedMetadata structure which itself is always located at offset zero. 139 // the SharedMetadata structure which itself is always located at offset zero.
131 const PersistentMemoryAllocator::Reference 140 const PersistentMemoryAllocator::Reference
132 PersistentMemoryAllocator::kReferenceQueue = 141 PersistentMemoryAllocator::kReferenceQueue =
133 offsetof(SharedMetadata, queue); 142 offsetof(SharedMetadata, queue);
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
305 readonly_(readonly), 314 readonly_(readonly),
306 corrupt_(0), 315 corrupt_(0),
307 allocs_histogram_(nullptr), 316 allocs_histogram_(nullptr),
308 used_histogram_(nullptr), 317 used_histogram_(nullptr),
309 errors_histogram_(nullptr) { 318 errors_histogram_(nullptr) {
310 // These asserts ensure that the structures are 32/64-bit agnostic and meet 319 // These asserts ensure that the structures are 32/64-bit agnostic and meet
311 // all the requirements of use within the allocator. They access private 320 // all the requirements of use within the allocator. They access private
312 // definitions and so cannot be moved to the global scope. 321 // definitions and so cannot be moved to the global scope.
313 static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16, 322 static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
314 "struct is not portable across different natural word widths"); 323 "struct is not portable across different natural word widths");
315 static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56, 324 static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 64,
316 "struct is not portable across different natural word widths"); 325 "struct is not portable across different natural word widths");
317 326
318 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, 327 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
319 "BlockHeader is not a multiple of kAllocAlignment"); 328 "BlockHeader is not a multiple of kAllocAlignment");
320 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, 329 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
321 "SharedMetadata is not a multiple of kAllocAlignment"); 330 "SharedMetadata is not a multiple of kAllocAlignment");
322 static_assert(kReferenceQueue % kAllocAlignment == 0, 331 static_assert(kReferenceQueue % kAllocAlignment == 0,
323 "\"queue\" is not aligned properly; must be at end of struct"); 332 "\"queue\" is not aligned properly; must be at end of struct");
324 333
325 // Ensure that memory segment is of acceptable size. 334 // Ensure that memory segment is of acceptable size.
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
377 shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release); 386 shared_meta()->tailptr.store(kReferenceQueue, std::memory_order_release);
378 387
379 // Allocate space for the name so other processes can learn it. 388 // Allocate space for the name so other processes can learn it.
380 if (!name.empty()) { 389 if (!name.empty()) {
381 const size_t name_length = name.length() + 1; 390 const size_t name_length = name.length() + 1;
382 shared_meta()->name = Allocate(name_length, 0); 391 shared_meta()->name = Allocate(name_length, 0);
383 char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length); 392 char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
384 if (name_cstr) 393 if (name_cstr)
385 memcpy(name_cstr, name.data(), name.length()); 394 memcpy(name_cstr, name.data(), name.length());
386 } 395 }
396
397 shared_meta()->memory_state.store(MEMORY_INITIALIZED,
398 std::memory_order_release);
387 } else { 399 } else {
388 if (shared_meta()->size == 0 || 400 if (shared_meta()->size == 0 || shared_meta()->version != kGlobalVersion ||
Alexei Svitkine (slow) 2017/03/15 15:47:20 So we would previously not use kGlobalVersion fiel
bcwhite 2017/03/15 19:21:48 There has only been one version before this so not
389 shared_meta()->version == 0 ||
390 shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 || 401 shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
391 shared_meta()->tailptr == 0 || 402 shared_meta()->tailptr == 0 || shared_meta()->queue.cookie == 0 ||
392 shared_meta()->queue.cookie == 0 ||
393 shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) { 403 shared_meta()->queue.next.load(std::memory_order_relaxed) == 0) {
394 SetCorrupt(); 404 SetCorrupt();
395 } 405 }
396 if (!readonly) { 406 if (!readonly) {
397 // The allocator is attaching to a previously initialized segment of 407 // The allocator is attaching to a previously initialized segment of
398 // memory. If the initialization parameters differ, make the best of it 408 // memory. If the initialization parameters differ, make the best of it
399 // by reducing the local construction parameters to match those of 409 // by reducing the local construction parameters to match those of
400 // the actual memory area. This ensures that the local object never 410 // the actual memory area. This ensures that the local object never
401 // tries to write outside of the original bounds. 411 // tries to write outside of the original bounds.
402 // Because the fields are const to ensure that no code other than the 412 // Because the fields are const to ensure that no code other than the
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
463 used_histogram_ = LinearHistogram::FactoryGet( 473 used_histogram_ = LinearHistogram::FactoryGet(
464 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21, 474 "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
465 HistogramBase::kUmaTargetedHistogramFlag); 475 HistogramBase::kUmaTargetedHistogramFlag);
466 476
467 DCHECK(!errors_histogram_); 477 DCHECK(!errors_histogram_);
468 errors_histogram_ = SparseHistogram::FactoryGet( 478 errors_histogram_ = SparseHistogram::FactoryGet(
469 "UMA.PersistentAllocator." + name_string + ".Errors", 479 "UMA.PersistentAllocator." + name_string + ".Errors",
470 HistogramBase::kUmaTargetedHistogramFlag); 480 HistogramBase::kUmaTargetedHistogramFlag);
471 } 481 }
472 482
483 void PersistentMemoryAllocator::SetMemoryState(uint8_t memory_state) {
484 shared_meta()->memory_state.store(memory_state, std::memory_order_relaxed);
485 Flush(sizeof(SharedMetadata), false);
486 }
487
488 uint8_t PersistentMemoryAllocator::GetMemoryState() {
489 return shared_meta()->memory_state.load(std::memory_order_relaxed);
490 }
491
473 size_t PersistentMemoryAllocator::used() const { 492 size_t PersistentMemoryAllocator::used() const {
474 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed), 493 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
475 mem_size_); 494 mem_size_);
476 } 495 }
477 496
478 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference( 497 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
479 const void* memory, 498 const void* memory,
480 uint32_t type_id) const { 499 uint32_t type_id) const {
481 uintptr_t address = reinterpret_cast<uintptr_t>(memory); 500 uintptr_t address = reinterpret_cast<uintptr_t>(memory);
482 if (address < reinterpret_cast<uintptr_t>(mem_base_)) 501 if (address < reinterpret_cast<uintptr_t>(mem_base_))
(...skipping 328 matching lines...) Expand 10 before | Expand all | Expand 10 after
811 if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata))) 830 if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
812 return nullptr; 831 return nullptr;
813 if (ref % kAllocAlignment != 0) 832 if (ref % kAllocAlignment != 0)
814 return nullptr; 833 return nullptr;
815 size += sizeof(BlockHeader); 834 size += sizeof(BlockHeader);
816 if (ref + size > mem_size_) 835 if (ref + size > mem_size_)
817 return nullptr; 836 return nullptr;
818 837
819 // Validation of referenced block-header. 838 // Validation of referenced block-header.
820 if (!free_ok) { 839 if (!free_ok) {
821 uint32_t freeptr = std::min(
822 shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
823 if (ref + size > freeptr)
824 return nullptr;
825 const volatile BlockHeader* const block = 840 const volatile BlockHeader* const block =
826 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); 841 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
842 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
Alexei Svitkine (slow) 2017/03/15 15:47:20 Shouldn't you check ref != kReferenceQueue before
bcwhite 2017/03/15 19:21:48 kReferenceQueue is still an offset of mem_base_ so
843 return nullptr;
827 if (block->size < size) 844 if (block->size < size)
828 return nullptr; 845 return nullptr;
829 if (ref + block->size > freeptr) 846 if (ref + block->size > mem_size_)
830 return nullptr;
831 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
832 return nullptr; 847 return nullptr;
833 if (type_id != 0 && 848 if (type_id != 0 &&
834 block->type_id.load(std::memory_order_relaxed) != type_id) { 849 block->type_id.load(std::memory_order_relaxed) != type_id) {
835 return nullptr; 850 return nullptr;
836 } 851 }
837 } 852 }
838 853
839 // Return pointer to block data. 854 // Return pointer to block data.
840 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref); 855 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
841 } 856 }
842 857
858 void PersistentMemoryAllocator::Flush(size_t length, bool sync) {}
Alexei Svitkine (slow) 2017/03/15 15:47:20 Nit: Maybe add a comment for why it makes sense to
bcwhite 2017/03/15 19:21:48 Done.
859
843 void PersistentMemoryAllocator::RecordError(int error) const { 860 void PersistentMemoryAllocator::RecordError(int error) const {
844 if (errors_histogram_) 861 if (errors_histogram_)
845 errors_histogram_->Add(error); 862 errors_histogram_->Add(error);
846 } 863 }
847 864
848 const volatile void* PersistentMemoryAllocator::GetBlockData( 865 const volatile void* PersistentMemoryAllocator::GetBlockData(
849 Reference ref, 866 Reference ref,
850 uint32_t type_id, 867 uint32_t type_id,
851 uint32_t size) const { 868 uint32_t size) const {
852 DCHECK(size > 0); 869 DCHECK(size > 0);
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
973 uint64_t id, 990 uint64_t id,
974 base::StringPiece name, 991 base::StringPiece name,
975 bool read_only) 992 bool read_only)
976 : PersistentMemoryAllocator( 993 : PersistentMemoryAllocator(
977 Memory(const_cast<uint8_t*>(file->data()), MEM_FILE), 994 Memory(const_cast<uint8_t*>(file->data()), MEM_FILE),
978 max_size != 0 ? max_size : file->length(), 995 max_size != 0 ? max_size : file->length(),
979 0, 996 0,
980 id, 997 id,
981 name, 998 name,
982 read_only), 999 read_only),
983 mapped_file_(std::move(file)) {} 1000 mapped_file_(std::move(file)) {
1001 // Ensure the disk-copy of the data reflects the fully-initialized memory as
1002 // there is no guarantee as to what order the pages might be auto-flushed by
1003 // the OS in the future.
1004 Flush(used(), true);
1005 }
984 1006
985 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {} 1007 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
986 1008
987 // static 1009 // static
988 bool FilePersistentMemoryAllocator::IsFileAcceptable( 1010 bool FilePersistentMemoryAllocator::IsFileAcceptable(
989 const MemoryMappedFile& file, 1011 const MemoryMappedFile& file,
990 bool read_only) { 1012 bool read_only) {
991 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only); 1013 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only);
992 } 1014 }
1015
1016 void FilePersistentMemoryAllocator::Flush(size_t length, bool sync) {
1017 if (sync)
1018 ThreadRestrictions::AssertIOAllowed();
1019
1020 #if defined(OS_WIN)
1021 // Windows doesn't support a synchronous flush.
1022 ::FlushViewOfFile(data(), length);
1023 #elif defined(OS_POSIX)
1024 ::msync(const_cast<void*>(data()), length,
1025 MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
Alexei Svitkine (slow) 2017/03/15 15:47:20 From man msync: "The MS_ASYNC flag is not permitt
bcwhite 2017/03/15 19:21:48 Where did you read that? http://man7.org/linux/ma
Alexei Svitkine (slow) 2017/03/15 20:05:29 I typed "man msync" on my Mac. So sounds like it's
bcwhite 2017/03/16 15:53:03 Done.
1026 #else
1027 #error Unsupported OS.
1028 #endif
1029 }
993 #endif // !defined(OS_NACL) 1030 #endif // !defined(OS_NACL)
994 1031
995 } // namespace base 1032 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698