OLD | NEW |
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/metrics/persistent_memory_allocator.h" | 5 #include "base/metrics/persistent_memory_allocator.h" |
6 | 6 |
7 #include <assert.h> | 7 #include <assert.h> |
8 #include <algorithm> | 8 #include <algorithm> |
9 | 9 |
10 #if defined(OS_WIN) | 10 #if defined(OS_WIN) |
11 #include "winbase.h" | 11 #include "winbase.h" |
12 #elif defined(OS_POSIX) | 12 #elif defined(OS_POSIX) |
13 #include <sys/mman.h> | 13 #include <sys/mman.h> |
14 #endif | 14 #endif |
15 | 15 |
16 #include "base/files/memory_mapped_file.h" | 16 #include "base/files/memory_mapped_file.h" |
17 #include "base/logging.h" | 17 #include "base/logging.h" |
18 #include "base/memory/shared_memory.h" | 18 #include "base/memory/shared_memory.h" |
19 #include "base/metrics/histogram_macros.h" | 19 #include "base/metrics/histogram_macros.h" |
| 20 #include "base/metrics/sparse_histogram.h" |
20 | 21 |
21 namespace { | 22 namespace { |
22 | 23 |
23 // Limit of memory segment size. It has to fit in an unsigned 32-bit number | 24 // Limit of memory segment size. It has to fit in an unsigned 32-bit number |
24 // and should be a power of 2 in order to accomodate almost any page size. | 25 // and should be a power of 2 in order to accomodate almost any page size. |
25 const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB | 26 const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB |
26 | 27 |
27 // A constant (random) value placed in the shared metadata to identify | 28 // A constant (random) value placed in the shared metadata to identify |
28 // an already initialized memory segment. | 29 // an already initialized memory segment. |
29 const uint32_t kGlobalCookie = 0x408305DC; | 30 const uint32_t kGlobalCookie = 0x408305DC; |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
240 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base, | 241 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base, |
241 size_t size, | 242 size_t size, |
242 size_t page_size, | 243 size_t page_size, |
243 bool readonly) { | 244 bool readonly) { |
244 return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) && | 245 return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) && |
245 (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) && | 246 (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) && |
246 (size % kAllocAlignment == 0 || readonly) && | 247 (size % kAllocAlignment == 0 || readonly) && |
247 (page_size == 0 || size % page_size == 0 || readonly)); | 248 (page_size == 0 || size % page_size == 0 || readonly)); |
248 } | 249 } |
249 | 250 |
250 PersistentMemoryAllocator::PersistentMemoryAllocator( | 251 PersistentMemoryAllocator::PersistentMemoryAllocator(void* base, |
251 void* base, | 252 size_t size, |
252 size_t size, | 253 size_t page_size, |
253 size_t page_size, | 254 uint64_t id, |
254 uint64_t id, | 255 base::StringPiece name, |
255 base::StringPiece name, | 256 bool readonly) |
256 bool readonly) | 257 : PersistentMemoryAllocator(Memory(base, MEM_EXTERNAL), |
257 : mem_base_(static_cast<char*>(base)), | 258 size, |
| 259 page_size, |
| 260 id, |
| 261 name, |
| 262 readonly) {} |
| 263 |
| 264 PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory, |
| 265 size_t size, |
| 266 size_t page_size, |
| 267 uint64_t id, |
| 268 base::StringPiece name, |
| 269 bool readonly) |
| 270 : mem_base_(static_cast<char*>(memory.base)), |
| 271 mem_type_(memory.type), |
258 mem_size_(static_cast<uint32_t>(size)), | 272 mem_size_(static_cast<uint32_t>(size)), |
259 mem_page_(static_cast<uint32_t>((page_size ? page_size : size))), | 273 mem_page_(static_cast<uint32_t>((page_size ? page_size : size))), |
260 readonly_(readonly), | 274 readonly_(readonly), |
261 corrupt_(0), | 275 corrupt_(0), |
262 allocs_histogram_(nullptr), | 276 allocs_histogram_(nullptr), |
263 used_histogram_(nullptr) { | 277 used_histogram_(nullptr) { |
264 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, | 278 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, |
265 "BlockHeader is not a multiple of kAllocAlignment"); | 279 "BlockHeader is not a multiple of kAllocAlignment"); |
266 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, | 280 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, |
267 "SharedMetadata is not a multiple of kAllocAlignment"); | 281 "SharedMetadata is not a multiple of kAllocAlignment"); |
268 static_assert(kReferenceQueue % kAllocAlignment == 0, | 282 static_assert(kReferenceQueue % kAllocAlignment == 0, |
269 "\"queue\" is not aligned properly; must be at end of struct"); | 283 "\"queue\" is not aligned properly; must be at end of struct"); |
270 | 284 |
271 // Ensure that memory segment is of acceptable size. | 285 // Ensure that memory segment is of acceptable size. |
272 CHECK(IsMemoryAcceptable(base, size, page_size, readonly)); | 286 CHECK(IsMemoryAcceptable(memory.base, size, page_size, readonly)); |
273 | 287 |
274 // These atomics operate inter-process and so must be lock-free. The local | 288 // These atomics operate inter-process and so must be lock-free. The local |
275 // casts are to make sure it can be evaluated at compile time to a constant. | 289 // casts are to make sure it can be evaluated at compile time to a constant. |
276 CHECK(((SharedMetadata*)0)->freeptr.is_lock_free()); | 290 CHECK(((SharedMetadata*)0)->freeptr.is_lock_free()); |
277 CHECK(((SharedMetadata*)0)->flags.is_lock_free()); | 291 CHECK(((SharedMetadata*)0)->flags.is_lock_free()); |
278 CHECK(((BlockHeader*)0)->next.is_lock_free()); | 292 CHECK(((BlockHeader*)0)->next.is_lock_free()); |
279 CHECK(corrupt_.is_lock_free()); | 293 CHECK(corrupt_.is_lock_free()); |
280 | 294 |
281 if (shared_meta()->cookie != kGlobalCookie) { | 295 if (shared_meta()->cookie != kGlobalCookie) { |
282 if (readonly) { | 296 if (readonly) { |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
348 // Because the fields are const to ensure that no code other than the | 362 // Because the fields are const to ensure that no code other than the |
349 // constructor makes changes to them as well as to give optimization | 363 // constructor makes changes to them as well as to give optimization |
350 // hints to the compiler, it's necessary to const-cast them for changes | 364 // hints to the compiler, it's necessary to const-cast them for changes |
351 // here. | 365 // here. |
352 if (shared_meta()->size < mem_size_) | 366 if (shared_meta()->size < mem_size_) |
353 *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size; | 367 *const_cast<uint32_t*>(&mem_size_) = shared_meta()->size; |
354 if (shared_meta()->page_size < mem_page_) | 368 if (shared_meta()->page_size < mem_page_) |
355 *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size; | 369 *const_cast<uint32_t*>(&mem_page_) = shared_meta()->page_size; |
356 | 370 |
357 // Ensure that settings are still valid after the above adjustments. | 371 // Ensure that settings are still valid after the above adjustments. |
358 if (!IsMemoryAcceptable(base, mem_size_, mem_page_, readonly)) | 372 if (!IsMemoryAcceptable(memory.base, mem_size_, mem_page_, readonly)) |
359 SetCorrupt(); | 373 SetCorrupt(); |
360 } | 374 } |
361 } | 375 } |
362 } | 376 } |
363 | 377 |
364 PersistentMemoryAllocator::~PersistentMemoryAllocator() { | 378 PersistentMemoryAllocator::~PersistentMemoryAllocator() { |
365 // It's strictly forbidden to do any memory access here in case there is | 379 // It's strictly forbidden to do any memory access here in case there is |
366 // some issue with the underlying memory segment. The "Local" allocator | 380 // some issue with the underlying memory segment. The "Local" allocator |
367 // makes use of this to allow deletion of the segment on the heap from | 381 // makes use of this to allow deletion of the segment on the heap from |
368 // within its destructor. | 382 // within its destructor. |
(...skipping 363 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
732 //----- LocalPersistentMemoryAllocator ----------------------------------------- | 746 //----- LocalPersistentMemoryAllocator ----------------------------------------- |
733 | 747 |
734 LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator( | 748 LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator( |
735 size_t size, | 749 size_t size, |
736 uint64_t id, | 750 uint64_t id, |
737 base::StringPiece name) | 751 base::StringPiece name) |
738 : PersistentMemoryAllocator(AllocateLocalMemory(size), | 752 : PersistentMemoryAllocator(AllocateLocalMemory(size), |
739 size, 0, id, name, false) {} | 753 size, 0, id, name, false) {} |
740 | 754 |
741 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() { | 755 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() { |
742 DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_); | 756 DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_); |
743 } | 757 } |
744 | 758 |
745 // static | 759 // static |
746 void* LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) { | 760 PersistentMemoryAllocator::Memory |
| 761 LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) { |
| 762 void* address; |
| 763 |
747 #if defined(OS_WIN) | 764 #if defined(OS_WIN) |
748 void* address = | 765 address = |
749 ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); | 766 ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); |
750 DPCHECK(address); | 767 if (address) |
751 return address; | 768 return Memory(address, MEM_VIRTUAL); |
| 769 UMA_HISTOGRAM_SPARSE_SLOWLY("UMA.LocalPersistentMemoryAllocator.Failures.Win", |
| 770 ::GetLastError()); |
752 #elif defined(OS_POSIX) | 771 #elif defined(OS_POSIX) |
753 // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac. | 772 // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac. |
754 // MAP_SHARED is not available on Linux <2.4 but required on Mac. | 773 // MAP_SHARED is not available on Linux <2.4 but required on Mac. |
755 void* address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE, | 774 address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE, |
756 MAP_ANON | MAP_SHARED, -1, 0); | 775 MAP_ANON | MAP_SHARED, -1, 0); |
757 DPCHECK(MAP_FAILED != address); | 776 if (address != MAP_FAILED) |
758 return address; | 777 return Memory(address, MEM_VIRTUAL); |
| 778 UMA_HISTOGRAM_SPARSE_SLOWLY( |
| 779 "UMA.LocalPersistentMemoryAllocator.Failures.Posix", errno); |
759 #else | 780 #else |
760 #error This architecture is not (yet) supported. | 781 #error This architecture is not (yet) supported. |
761 #endif | 782 #endif |
| 783 |
| 784 // As a last resort, just allocate the memory from the heap. This will |
| 785 // achieve the same basic result but the acquired memory has to be |
| 786 // explicitly zeroed and thus realized immediately (i.e. all pages are |
| 787 // added to the process now istead of only when first accessed). |
| 788 address = malloc(size); |
| 789 DPCHECK(address); |
| 790 memset(address, 0, size); |
| 791 return Memory(address, MEM_MALLOC); |
762 } | 792 } |
763 | 793 |
764 // static | 794 // static |
765 void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory, | 795 void LocalPersistentMemoryAllocator::DeallocateLocalMemory(void* memory, |
766 size_t size) { | 796 size_t size, |
| 797 MemoryType type) { |
| 798 if (type == MEM_MALLOC) { |
| 799 free(memory); |
| 800 return; |
| 801 } |
| 802 |
| 803 DCHECK_EQ(MEM_VIRTUAL, type); |
767 #if defined(OS_WIN) | 804 #if defined(OS_WIN) |
768 BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT); | 805 BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT); |
769 DPCHECK(success); | 806 DCHECK(success); |
770 #elif defined(OS_POSIX) | 807 #elif defined(OS_POSIX) |
771 int result = ::munmap(memory, size); | 808 int result = ::munmap(memory, size); |
772 DPCHECK(0 == result); | 809 DCHECK_EQ(0, result); |
773 #else | 810 #else |
774 #error This architecture is not (yet) supported. | 811 #error This architecture is not (yet) supported. |
775 #endif | 812 #endif |
776 } | 813 } |
777 | 814 |
778 | 815 |
779 //----- SharedPersistentMemoryAllocator ---------------------------------------- | 816 //----- SharedPersistentMemoryAllocator ---------------------------------------- |
780 | 817 |
781 SharedPersistentMemoryAllocator::SharedPersistentMemoryAllocator( | 818 SharedPersistentMemoryAllocator::SharedPersistentMemoryAllocator( |
782 std::unique_ptr<SharedMemory> memory, | 819 std::unique_ptr<SharedMemory> memory, |
783 uint64_t id, | 820 uint64_t id, |
784 base::StringPiece name, | 821 base::StringPiece name, |
785 bool read_only) | 822 bool read_only) |
786 : PersistentMemoryAllocator(static_cast<uint8_t*>(memory->memory()), | 823 : PersistentMemoryAllocator( |
787 memory->mapped_size(), | 824 Memory(static_cast<uint8_t*>(memory->memory()), MEM_SHARED), |
788 0, | 825 memory->mapped_size(), |
789 id, | 826 0, |
790 name, | 827 id, |
791 read_only), | 828 name, |
| 829 read_only), |
792 shared_memory_(std::move(memory)) {} | 830 shared_memory_(std::move(memory)) {} |
793 | 831 |
794 SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() {} | 832 SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() {} |
795 | 833 |
796 // static | 834 // static |
797 bool SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable( | 835 bool SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable( |
798 const SharedMemory& memory) { | 836 const SharedMemory& memory) { |
799 return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, false); | 837 return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, false); |
800 } | 838 } |
801 | 839 |
802 | 840 |
803 #if !defined(OS_NACL) | 841 #if !defined(OS_NACL) |
804 //----- FilePersistentMemoryAllocator ------------------------------------------ | 842 //----- FilePersistentMemoryAllocator ------------------------------------------ |
805 | 843 |
806 FilePersistentMemoryAllocator::FilePersistentMemoryAllocator( | 844 FilePersistentMemoryAllocator::FilePersistentMemoryAllocator( |
807 std::unique_ptr<MemoryMappedFile> file, | 845 std::unique_ptr<MemoryMappedFile> file, |
808 size_t max_size, | 846 size_t max_size, |
809 uint64_t id, | 847 uint64_t id, |
810 base::StringPiece name, | 848 base::StringPiece name, |
811 bool read_only) | 849 bool read_only) |
812 : PersistentMemoryAllocator(const_cast<uint8_t*>(file->data()), | 850 : PersistentMemoryAllocator( |
813 max_size != 0 ? max_size : file->length(), | 851 Memory(const_cast<uint8_t*>(file->data()), MEM_FILE), |
814 0, | 852 max_size != 0 ? max_size : file->length(), |
815 id, | 853 0, |
816 name, | 854 id, |
817 read_only), | 855 name, |
| 856 read_only), |
818 mapped_file_(std::move(file)) {} | 857 mapped_file_(std::move(file)) {} |
819 | 858 |
820 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {} | 859 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {} |
821 | 860 |
822 // static | 861 // static |
823 bool FilePersistentMemoryAllocator::IsFileAcceptable( | 862 bool FilePersistentMemoryAllocator::IsFileAcceptable( |
824 const MemoryMappedFile& file, | 863 const MemoryMappedFile& file, |
825 bool read_only) { | 864 bool read_only) { |
826 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only); | 865 return IsMemoryAcceptable(file.data(), file.length(), 0, read_only); |
827 } | 866 } |
828 #endif // !defined(OS_NACL) | 867 #endif // !defined(OS_NACL) |
829 | 868 |
830 } // namespace base | 869 } // namespace base |
OLD | NEW |