Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2017 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "services/blamer/shared_memory_heap.h" | |
| 6 | |
| 7 #include "services/blamer/public/interfaces/shared_memory_heap_registry.mojom.h" | |
| 8 | |
| 9 namespace blamer { | |
| 10 | |
| 11 namespace { | |
| 12 | |
| 13 // A sentinel value that indicates a process or thread is currently allocating | |
| 14 // a new slab. | |
| 15 // DO NOT SUBMIT | |
| 16 base::PersistentMemoryAllocator* kCurrentSlabLockSentinel = | |
| 17 reinterpret_cast<base::PersistentMemoryAllocator*>(1); | |
| 18 | |
| 19 } // namespace | |
| 20 | |
| 21 SharedMemoryHeap::SharedMemoryHeap( | |
| 22 mojom::SharedMemoryHeapRegistryPtr heap_registry) | |
| 23 : heap_registry_(std::move(heap_registry)), | |
| 24 current_slab_(nullptr) { | |
| 25 } | |
| 26 | |
| 27 SharedMemoryHeap::~SharedMemoryHeap() = default; | |
| 28 | |
| 29 base::PersistentMemoryAllocator* SharedMemoryHeap::GetCurrentSlab() { | |
| 30 while (true) { | |
| 31 auto* slab = current_slab_.load(); | |
| 32 // If the sentinel value is encountered another thread is currently creating | |
| 33 // a slab. Busy loop until it's ready. | |
| 34 if (slab == kCurrentSlabLockSentinel) | |
| 35 continue; | |
| 36 // If a nullptr is encountered then this is racing to be the first thread to | |
| 37 // create a new slab. | |
| 38 if (slab == nullptr) | |
| 39 return CreateNewSlab(slab); | |
| 40 // Otherwise, a valid slab has been created and is in use. | |
| 41 return slab; | |
| 42 } | |
| 43 } | |
| 44 | |
| 45 base::PersistentMemoryAllocator* SharedMemoryHeap::CreateNewSlab( | |
| 46 base::PersistentMemoryAllocator* current_slab) { | |
| 47 // Only one thread gets the opportunity to create a new slab. That is the | |
| 48 // thread that successfully sets |current_slab_| to the sentinel value. | |
| 49 if (!current_slab_.compare_exchange_strong(current_slab, | |
| 50 kCurrentSlabLockSentinel)) { | |
| 51 | |
| 52 // If another thread beat us here then wait until they've created a new | |
| 53 // allocator. | |
| 54 while (true) { | |
| 55 auto* slab = current_slab_.load(); | |
| 56 if (slab != kCurrentSlabLockSentinel && slab != current_slab) | |
| 57 return slab; | |
| 58 } | |
| 59 } | |
| 60 | |
| 61 // Only the thread that acquired the right to create a new slab makes it here. | |
| 62 | |
| 63 base::PersistentMemoryAllocator* new_slab = nullptr; | |
| 64 { | |
| 65 // First create and map the slab of shared memory. | |
| 66 mojo::ScopedSharedBufferHandle buffer = | |
| 67 mojo::SharedBufferHandle::Create(kSlabSize); | |
| 68 mojo::ScopedSharedBufferMapping mapping = buffer->Map(kSlabSize); | |
| 69 | |
| 70 // Build a persistent allocator over it. | |
| 71 size_t page_size = 0; | |
| 72 size_t slab_id = slabs_.size(); | |
| 73 std::unique_ptr<base::PersistentMemoryAllocator> allocator = | |
| 74 base::MakeUnique<base::PersistentMemoryAllocator>( | |
| 75 mapping.get(), kSlabSize, page_size, slab_id, | |
| 76 "SharedMemoryAllocatorSlab", false); | |
| 77 new_slab = allocator.get(); | |
| 78 | |
| 79 // Add this to the list of slabs. | |
| 80 Slab slab; | |
| 81 slab.buffer = std::move(buffer); | |
| 82 slab.mapping = std::move(mapping); | |
| 83 slab.allocator = std::move(allocator); | |
| 84 slabs_.push_back(std::move(slab)); | |
|
Sami
2017/05/19 13:44:04
I wondering about re-entrancy issues here, e.g., t
chrisha
2017/05/25 18:05:07
Yeah, fair point. Depending on what the memory-inf
| |
| 85 | |
| 86 // Register the new slab with the central service registry. | |
| 87 auto& local_slab = slabs_.back(); | |
| 88 mojom::SharedMemoryHeapSlab shared_slab; | |
| 89 shared_slab.buffer = local_slab.buffer->Clone(); | |
| 90 shared_slab.size = kSlabSize; | |
| 91 shared_slab.id = slab_id; | |
| 92 | |
| 93 } | |
| 94 | |
| 95 // Update the current slab. This will unblock other threads waiting for a new | |
| 96 // slab. | |
| 97 base::PersistentMemoryAllocator* expected = kCurrentSlabLockSentinel; | |
| 98 CHECK(current_slab_.compare_exchange_strong(expected, new_slab)); | |
| 99 | |
| 100 return new_slab; | |
| 101 } | |
| 102 | |
| 103 SharedMemoryHeap::LocalPointer SharedMemoryHeap::Allocate( | |
| 104 HeapObjectType object_type, size_t object_size) { | |
| 105 auto* slab = GetCurrentSlab(); | |
| 106 | |
| 107 // Allocate the object. If it fails get a new slab and try again. | |
| 108 // TODO(chrisha): Clean up the failure path that causes slabs to be allocated | |
| 109 // until memory is exhausted! | |
| 110 uint32_t ref = slab->Allocate(object_size, object_type); | |
| 111 if (ref == 0) { | |
| 112 slab = CreateNewSlab(slab); | |
| 113 ref = slab->Allocate(object_size, object_type); | |
| 114 } | |
|
Sami
2017/05/19 13:44:04
CHECK(ref); ?
| |
| 115 | |
| 116 LocalPointer local_pointer = {}; | |
| 117 local_pointer.slab = slab; | |
| 118 local_pointer.pointer.slab_id = slab->Id(); | |
| 119 local_pointer.pointer.slab_offset = ref; | |
| 120 return local_pointer; | |
| 121 } | |
| 122 | |
| 123 void SharedMemoryHeap::Free(HeapObjectType object_type, | |
| 124 const LocalPointer& pointer) { | |
| 125 // TODO(chrisha): Create free lists, and repurpose this memory! For now, | |
| 126 // simply mark it as FREED and abandon it. | |
| 127 auto* slab = reinterpret_cast<base::PersistentMemoryAllocator*>( | |
| 128 pointer.slab); | |
| 129 uint32_t ref = pointer.pointer.slab_offset; | |
| 130 slab->ChangeType(ref, HeapObjectType::FREED, object_type, false); | |
| 131 } | |
| 132 | |
| 133 SharedMemoryHeap::Slab::Slab() {} | |
| 134 | |
| 135 SharedMemoryHeap::Slab::Slab(Slab&& other) | |
| 136 : buffer(std::move(other.buffer)), | |
| 137 mapping(std::move(other.mapping)), | |
| 138 allocator(std::move(other.allocator)) { | |
| 139 } | |
| 140 | |
| 141 SharedMemoryHeap::Slab::~Slab() {} | |
| 142 | |
| 143 } // namespace blamer | |
| OLD | NEW |