Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(689)

Unified Diff: base/memory/shared_memory_allocator.cc

Issue 1410213004: Create "persistent memory allocator" for persisting and sharing objects. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: base/memory/shared_memory_allocator.cc
diff --git a/base/memory/shared_memory_allocator.cc b/base/memory/shared_memory_allocator.cc
new file mode 100644
index 0000000000000000000000000000000000000000..cc75805a4c30f4fe65fa4c68fbf71bead16069bd
--- /dev/null
+++ b/base/memory/shared_memory_allocator.cc
@@ -0,0 +1,328 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_allocator.h"
+
+#include <assert.h>
+
+#include "base/atomicops.h"
+#include "base/logging.h"
+
+// All allocations and data-structures must be aligned to this byte boundary.
+#define ALLOC_ALIGNMENT 16
chrisha 2015/10/29 21:05:13 Windows only provides 8-byte alignment... and a tr
bcwhite 2015/10/29 23:40:57 It shouldn't be 4 because then 64-bit values could
chrisha 2015/10/30 14:36:46 8 sgtm
bcwhite 2015/10/30 15:15:35 Done.
+
+// The equivalest NULL value for an offset.
+#define OFFSET_NULL 0
+
+namespace {
+
+// A constant (random) value placed in the shared meta-data to identify
+// an already initialized memory segment.
+const int32 GLOBAL_COOKIE = 0x408305DC;
+
+// The current version of the meta-data. If updates are made that change
+// the meta-data, the version number can be queried to operate in a backward-
+// compatible manner until the memory segment is completely re-initalized.
+const int32 GLOBAL_VERSION = 1;
+
+// Constant values placed in the block headers to indicate its state.
+const int32 BLOCK_COOKIE_FREE = 0;
+const int32 BLOCK_COOKIE_QUEUE = 1;
+const int32 BLOCK_COOKIE_WASTED = -1;
+const int32 BLOCK_COOKIE_ALLOCATED = 0xC8799269;
chrisha 2015/10/29 21:05:13 For all of these constants the more common style i
bcwhite 2015/10/29 23:40:57 Done.
+
+} // namespace
+
+namespace base {
+
+struct SharedMemoryAllocator::BlockHeader {
chrisha 2015/10/29 21:05:14 Description of this struct.
bcwhite 2015/10/29 23:40:57 Done.
+ int32 size; // number of bytes in this block, including header
chrisha 2015/10/29 21:05:13 micronit: Comments should be full sentences. Numb
bcwhite 2015/10/29 23:40:57 That's technically a sentence fragment. I can mak
+ int32 cookie; // constant value indicating completed allocation
+ int32 type; // a number provided by caller indicating data type
chrisha 2015/10/29 21:05:13 Is this needed?
bcwhite 2015/10/29 23:40:57 I think so, yes. When allocating, the caller know
chrisha 2015/10/30 14:36:46 Many heap implementations are iterable. They don't
+ subtle::Atomic32 next; // pointer to the next block when iterating
+};
+
+struct SharedMemoryAllocator::SharedMetaData {
chrisha 2015/10/29 21:05:13 Ditto.
bcwhite 2015/10/29 23:40:57 Done.
+ int32 cookie; // some value that indicates complete initialization
+ int32 size; // total size of memory segment
+ int32 version; // version code so upgrades don't break
+ subtle::Atomic32 freeptr; // offset to first free space in the segment
+ int32 reserved[2];
chrisha 2015/10/29 21:05:13 What is this for?
bcwhite 2015/10/29 23:40:57 Padding. This structure must pad out to a multipl
+ char corrupted; // flag indicating that corruption has been detected
+ char full; // flag indicating that alloc failed because segment is full
+ char flags[2]; // align to next int (not strictly needed but avoid confusion)
chrisha 2015/10/29 21:05:13 Use a bitfield for flags? (Easier to grow in the f
bcwhite 2015/10/29 23:40:57 Bitfields are read-modify-write which are race-con
chrisha 2015/10/30 14:36:46 The only thing that's atomic is a machine word wri
bcwhite 2015/10/30 15:15:35 That used to be the case but C++-11 changed that.
+
+ // The "iterable" queue is an M&S Queue as described here, append-only:
+ // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
+ subtle::Atomic32 tailptr; // last block available for iteration
+ BlockHeader queue; // empty block for linked-list head/tail (must be last)
+};
+
+// The "queue" block header is used to detect "last node" so that zero/null
+// can be used to indicate that it hasn't been added at all. It is part of
+// the SharedMetaData structure which itself is always located at offset zero.
+#define OFFSET_QUEUE offsetof(SharedMetaData, queue)
chrisha 2015/10/29 21:05:13 Use a constant here?
bcwhite 2015/10/29 23:40:57 A constant doesn't work because SharedMetaData is
chrisha 2015/10/30 14:36:46 sgtm
+
+SharedMemoryAllocator::SharedMemoryAllocator(void* base, int32 size,
+ int32 page)
chrisha 2015/10/29 21:05:13 This doesn't fit on the previous line?
bcwhite 2015/10/29 23:40:57 So it does. At one point I was trying uint32. (b
+ : shared_meta_(static_cast<SharedMetaData*>(base)),
+ mem_base_(static_cast<char*>(base)),
+ mem_size_(size),
+ mem_page_(page ? page : size),
+ last_seen_(0),
+ corrupted_(false) {
+ static_assert(sizeof(BlockHeader) % ALLOC_ALIGNMENT == 0,
chrisha 2015/10/29 21:05:13 Put these static asserts below the struct definiti
bcwhite 2015/10/29 23:40:57 They don't work there because the structures are p
+ "BlockHeader is not a multiple of ALLOC_ALIGNMENT");
+ static_assert(sizeof(SharedMetaData) % ALLOC_ALIGNMENT == 0,
+ "SharedMetaData is not a multiple of ALLOC_ALIGNMENT");
+
+ DCHECK(base && reinterpret_cast<int>(base) % ALLOC_ALIGNMENT == 0);
+ DCHECK(size >= 1 << 10 && size <= 1 << 20 && // 1 KiB <= size <= 1 MiB
+ size % ALLOC_ALIGNMENT == 0);
+ DCHECK(page >= 0 && (page == 0 || size % page == 0));
+
+ if (shared_meta_->cookie != GLOBAL_COOKIE) {
+ // This block is only executed when a completely new memory segment is
+ // being initialized. It's unshared and single-threaded...
+ const BlockHeader* first_block = reinterpret_cast<BlockHeader*>(
+ mem_base_ + sizeof(SharedMetaData));
+ if (shared_meta_->cookie != 0 ||
+ shared_meta_->size != 0 ||
+ shared_meta_->version != 0 ||
+ shared_meta_->freeptr != 0 ||
+ shared_meta_->corrupted != 0 ||
+ shared_meta_->full != 0 ||
+ shared_meta_->tailptr != 0 ||
+ shared_meta_->queue.cookie != 0 ||
+ shared_meta_->queue.next != 0 ||
+ first_block->size != 0 ||
+ first_block->cookie != 0 ||
+ first_block->type != 0 ||
+ first_block->next != 0) {
+ // ...or something malicious has been playing with the meta-data.
chrisha 2015/10/29 21:05:13 ubernit: metadata is one word
bcwhite 2015/10/29 23:40:57 Done. Here and elsewhere.
+ SetCorrupted();
+ }
+
+ // This is still safe to do even if corruption has been detected.
+ shared_meta_->cookie = GLOBAL_COOKIE;
+ shared_meta_->size = size;
+ shared_meta_->version = GLOBAL_VERSION;
+ subtle::NoBarrier_Store(&shared_meta_->freeptr, sizeof(SharedMetaData));
+
+ // Set up the queue of iterable allocations.
+ shared_meta_->queue.size = sizeof(BlockHeader);
+ shared_meta_->queue.cookie = BLOCK_COOKIE_QUEUE;
+ subtle::NoBarrier_Store(&shared_meta_->queue.next, OFFSET_QUEUE);
+ subtle::NoBarrier_Store(&shared_meta_->tailptr, OFFSET_QUEUE);
+ }
+}
+
+SharedMemoryAllocator::~SharedMemoryAllocator() {
+}
+
+int32 SharedMemoryAllocator::Allocate(int32 size, int32 type) {
+ if (size < 0) {
+ NOTREACHED();
+ return OFFSET_NULL;
+ }
+
+ // Round up the requested size, plus header, to the next allocation alignment.
+ size += sizeof(BlockHeader);
+ size = (size + (ALLOC_ALIGNMENT - 1)) & ~(ALLOC_ALIGNMENT - 1);
+ if (size > mem_page_)
+ return OFFSET_NULL;
+
+ // Allocation is lockless so we do all our caculation and then, if saving
+ // indicates a change has occurred since we started, scrap everything and
+ // start over.
+ for (;;) {
+ if (IsCorrupted())
+ return OFFSET_NULL;
+
+ int32 freeptr = subtle::Acquire_Load(&shared_meta_->freeptr);
+ if (freeptr + size > mem_size_) {
+ shared_meta_->full = true;
+ return OFFSET_NULL;
+ }
+ BlockHeader* block = GetBlock(freeptr, 0, 0, true);
+ if (!block) {
+ SetCorrupted();
+ return OFFSET_NULL;
+ }
+
+ // An allocation cannot cross page boundaries. If it would, create a
+ // "wasted" block and begin again at the top of the next page.
+ int32 page_free = mem_page_ - freeptr % mem_page_;
+ if (size > page_free) {
+ int32 new_freeptr = freeptr + page_free;
+ if (subtle::Release_CompareAndSwap(
+ &shared_meta_->freeptr, freeptr, new_freeptr) == freeptr) {
+ block->size = page_free;
+ block->cookie = BLOCK_COOKIE_WASTED;
+ }
+ continue;
+ }
+
+ // Don't leave a slice at the end of a page too small for anything.
+ if (page_free - size < sizeof(BlockHeader) + ALLOC_ALIGNMENT)
+ size = page_free;
+
+ int32 new_freeptr = freeptr + size;
+ if (new_freeptr > mem_size_) {
+ SetCorrupted();
+ return OFFSET_NULL;
+ }
+
+ if (subtle::Release_CompareAndSwap(
+ &shared_meta_->freeptr, freeptr, new_freeptr) != freeptr) {
+ // Another thread must have completed an allocation while we were working.
+ // Try again.
+ continue;
+ }
+
+ // Since allocating a block is atomic and all unallocated memory must be
+ // zeros, any other value indicates that something has run amuck.
+ if (block->size != 0 ||
+ block->cookie != BLOCK_COOKIE_FREE ||
+ block->type != 0 ||
+ subtle::NoBarrier_Load(&block->next) != 0) {
+ SetCorrupted();
+ return OFFSET_NULL;
+ }
+
+ block->size = size;
+ block->cookie = BLOCK_COOKIE_ALLOCATED;
+ block->type = type;
+ return freeptr;
+ }
+}
+
+void SharedMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) {
+ int32 remaining = mem_size_ - subtle::NoBarrier_Load(&shared_meta_->freeptr);
+ meminfo->total = mem_size_;
+ meminfo->free = shared_meta_->corrupted ? 0 : remaining - sizeof(BlockHeader);
+}
+
+void SharedMemoryAllocator::MakeIterable(int32 offset) {
+ if (IsCorrupted())
+ return;
+ BlockHeader* block = GetBlock(offset, 0, 0, false);
+ if (!block) // invalid offset
+ return;
+ if (subtle::NoBarrier_Load(&block->next) != 0) // previously set iterable
+ return;
+ subtle::NoBarrier_Store(&block->next, OFFSET_QUEUE); // will be tail block
+
+ // Try to add this block to the tail of the queue. May take multiple tries.
+ int32 tail;
+ for (;;) {
+ tail = subtle::Acquire_Load(&shared_meta_->tailptr);
+ block = GetBlock(tail, 0, 0, true);
+ if (!block) {
+ SetCorrupted();
+ return;
+ }
+ int32 next = subtle::NoBarrier_Load(&block->next);
+
+ // Ensure that the tail pointer didn't change while reading next.
+ if (tail == subtle::Release_Load(&shared_meta_->tailptr)) {
+ // Check if the found block is truely the last in the queue (i.e. it
+ // points back to the "queue" node).
+ if (next == OFFSET_QUEUE) {
+ // Yes. Try to append the passed block after the current tail block.
+ if (subtle::Release_CompareAndSwap(
+ &block->next, OFFSET_QUEUE, offset) == OFFSET_QUEUE) {
+ // Success! The block is enqueued; need to update the tail pointer.
+ break;
+ }
+ } else {
+ // No. Another thread has stopped between the block-next update
+ // and the tail-pointer update. Try to update tailptr past the
+ // found block. That other thread may complete it first or it
+ // may have crashed. Be fail-safe.
+ subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, next);
+ }
+ }
+ }
+
+ // Block has been enqueued. Now update the tail-pointer past it. This
+ // could fail if another thread has already completed the operation as
+ // part of being fail-safe.
+ subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, offset);
+}
+
+int32 SharedMemoryAllocator::GetFirstIterable(Iterator* state, int32* type) {
+ state->last = OFFSET_QUEUE;
+ return GetNextIterable(state, type);
+}
+
+int32 SharedMemoryAllocator::GetNextIterable(Iterator* state, int32* type) {
+ const BlockHeader* block = GetBlock(state->last, 0, 0, true);
+ if (!block) // invalid iterator state
+ return OFFSET_NULL;
+ int32 next = subtle::NoBarrier_Load(&block->next);
+ block = GetBlock(next, 0, 0, false);
+ if (!block) // no next allocation in queue
+ return OFFSET_NULL;
+
+ state->last = next;
+ *type = block->type;
+ return next;
+}
+
+void SharedMemoryAllocator::SetCorrupted() {
+ LOG(ERROR) << "Corruption detected in shared-memory segment.";
+ corrupted_ = true;
+ shared_meta_->corrupted = true;
+}
+
+bool SharedMemoryAllocator::IsCorrupted() {
+ if (corrupted_ || shared_meta_->corrupted) {
+ SetCorrupted(); // Make sure all indicators are set.
+ return true;
+ }
+ return false;
+}
+
+bool SharedMemoryAllocator::IsFull() {
+ return shared_meta_->full != 0;
+}
+
+SharedMemoryAllocator::BlockHeader* SharedMemoryAllocator::GetBlock(
+ int32 offset, int32 type, int32 size, bool special) {
+ // Validation of parameters.
+ if (offset % ALLOC_ALIGNMENT != 0)
+ return nullptr;
+ if (offset < (int)(special ? OFFSET_QUEUE : sizeof(SharedMetaData)))
+ return nullptr;
+ size += sizeof(BlockHeader);
+ if (offset + size > mem_size_)
+ return nullptr;
+ int32 freeptr = subtle::NoBarrier_Load(&shared_meta_->freeptr);
+ if (offset + size > freeptr + (int)(special ? sizeof(BlockHeader) : 0))
+ return nullptr;
+
+ // Validation of referenced block-header.
+ const BlockHeader* block = reinterpret_cast<BlockHeader*>(mem_base_ + offset);
+ if (offset != freeptr && block->size < size)
+ return nullptr;
+ if (!special && block->cookie != BLOCK_COOKIE_ALLOCATED)
+ return nullptr;
+ if (type != 0 && block->type != type)
+ return nullptr;
+
+ // Return pointer to block data.
+ return reinterpret_cast<BlockHeader*>(mem_base_ + offset);
+}
+
+void* SharedMemoryAllocator::GetBlockData(int32 offset, int32 type,
+ int32 size, bool special) {
+ DCHECK(size > 0);
+ BlockHeader* block = GetBlock(offset, type, size, special);
+ if (!block)
+ return nullptr;
+ return reinterpret_cast<char*>(block) + sizeof(BlockHeader);
+}
+
+} // namespace base

Powered by Google App Engine
This is Rietveld 408576698