Index: services/blamer/shared_memory_heap.cc |
diff --git a/services/blamer/shared_memory_heap.cc b/services/blamer/shared_memory_heap.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..35fb099c3e66d6a0603114acbd8fc1a23d64df13 |
--- /dev/null |
+++ b/services/blamer/shared_memory_heap.cc |
@@ -0,0 +1,143 @@ |
+// Copyright 2017 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "services/blamer/shared_memory_heap.h" |
+ |
+#include "services/blamer/public/interfaces/shared_memory_heap_registry.mojom.h" |
+ |
+namespace blamer { |
+ |
+namespace { |
+ |
+// A sentinel value that indicates a process or thread is currently allocating |
+// a new slab. |
+// DO NOT SUBMIT |
+base::PersistentMemoryAllocator* kCurrentSlabLockSentinel = |
+ reinterpret_cast<base::PersistentMemoryAllocator*>(1); |
+ |
+} // namespace |
+ |
+SharedMemoryHeap::SharedMemoryHeap( |
+ mojom::SharedMemoryHeapRegistryPtr heap_registry) |
+ : heap_registry_(std::move(heap_registry)), |
+ current_slab_(nullptr) { |
+} |
+ |
+SharedMemoryHeap::~SharedMemoryHeap() = default; |
+ |
+base::PersistentMemoryAllocator* SharedMemoryHeap::GetCurrentSlab() { |
+ while (true) { |
+ auto* slab = current_slab_.load(); |
+ // If the sentinel value is encountered another thread is currently creating |
+ // a slab. Busy loop until it's ready. |
+ if (slab == kCurrentSlabLockSentinel) |
+ continue; |
+ // If a nullptr is encountered then this is racing to be the first thread to |
+ // create a new slab. |
+ if (slab == nullptr) |
+ return CreateNewSlab(slab); |
+ // Otherwise, a valid slab has been created and is in use. |
+ return slab; |
+ } |
+} |
+ |
+base::PersistentMemoryAllocator* SharedMemoryHeap::CreateNewSlab( |
+ base::PersistentMemoryAllocator* current_slab) { |
+ // Only one thread gets the opportunity to create a new slab. That is the |
+ // thread that successfully sets |current_slab_| to the sentinel value. |
+ if (!current_slab_.compare_exchange_strong(current_slab, |
+ kCurrentSlabLockSentinel)) { |
+ |
+ // If another thread beat us here then wait until they've created a new |
+ // allocator. |
+ while (true) { |
+ auto* slab = current_slab_.load(); |
+ if (slab != kCurrentSlabLockSentinel && slab != current_slab) |
+ return slab; |
+ } |
+ } |
+ |
+ // Only the thread that acquired the right to create a new slab makes it here. |
+ |
+ base::PersistentMemoryAllocator* new_slab = nullptr; |
+ { |
+ // First create and map the slab of shared memory. |
+ mojo::ScopedSharedBufferHandle buffer = |
+ mojo::SharedBufferHandle::Create(kSlabSize); |
+ mojo::ScopedSharedBufferMapping mapping = buffer->Map(kSlabSize); |
+ |
+ // Build a persistent allocator over it. |
+ size_t page_size = 0; |
+ size_t slab_id = slabs_.size(); |
+ std::unique_ptr<base::PersistentMemoryAllocator> allocator = |
+ base::MakeUnique<base::PersistentMemoryAllocator>( |
+ mapping.get(), kSlabSize, page_size, slab_id, |
+ "SharedMemoryAllocatorSlab", false); |
+ new_slab = allocator.get(); |
+ |
+ // Add this to the list of slabs. |
+ Slab slab; |
+ slab.buffer = std::move(buffer); |
+ slab.mapping = std::move(mapping); |
+ slab.allocator = std::move(allocator); |
+ slabs_.push_back(std::move(slab)); |
Sami
2017/05/19 13:44:04
I wondering about re-entrancy issues here, e.g., t
chrisha
2017/05/25 18:05:07
Yeah, fair point. Depending on what the memory-inf
|
+ |
+ // Register the new slab with the central service registry. |
+ auto& local_slab = slabs_.back(); |
+ mojom::SharedMemoryHeapSlab shared_slab; |
+ shared_slab.buffer = local_slab.buffer->Clone(); |
+ shared_slab.size = kSlabSize; |
+ shared_slab.id = slab_id; |
+ |
+ } |
+ |
+ // Update the current slab. This will unblock other threads waiting for a new |
+ // slab. |
+ base::PersistentMemoryAllocator* expected = kCurrentSlabLockSentinel; |
+ CHECK(current_slab_.compare_exchange_strong(expected, new_slab)); |
+ |
+ return new_slab; |
+} |
+ |
+SharedMemoryHeap::LocalPointer SharedMemoryHeap::Allocate( |
+ HeapObjectType object_type, size_t object_size) { |
+ auto* slab = GetCurrentSlab(); |
+ |
+ // Allocate the object. If it fails get a new slab and try again. |
+ // TODO(chrisha): Clean up the failure path that causes slabs to be allocated |
+ // until memory is exhausted! |
+ uint32_t ref = slab->Allocate(object_size, object_type); |
+ if (ref == 0) { |
+ slab = CreateNewSlab(slab); |
+ ref = slab->Allocate(object_size, object_type); |
+ } |
Sami
2017/05/19 13:44:04
CHECK(ref); ?
|
+ |
+ LocalPointer local_pointer = {}; |
+ local_pointer.slab = slab; |
+ local_pointer.pointer.slab_id = slab->Id(); |
+ local_pointer.pointer.slab_offset = ref; |
+ return local_pointer; |
+} |
+ |
+void SharedMemoryHeap::Free(HeapObjectType object_type, |
+ const LocalPointer& pointer) { |
+ // TODO(chrisha): Create free lists, and repurpose this memory! For now, |
+ // simply mark it as FREED and abandon it. |
+ auto* slab = reinterpret_cast<base::PersistentMemoryAllocator*>( |
+ pointer.slab); |
+ uint32_t ref = pointer.pointer.slab_offset; |
+ slab->ChangeType(ref, HeapObjectType::FREED, object_type, false); |
+} |
+ |
+SharedMemoryHeap::Slab::Slab() {} |
+ |
+SharedMemoryHeap::Slab::Slab(Slab&& other) |
+ : buffer(std::move(other.buffer)), |
+ mapping(std::move(other.mapping)), |
+ allocator(std::move(other.allocator)) { |
+} |
+ |
+SharedMemoryHeap::Slab::~Slab() {} |
+ |
+} // namespace blamer |