 Chromium Code Reviews
 Chromium Code Reviews Issue 1410213004:
  Create "persistent memory allocator" for persisting and sharing objects.  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@master
    
  
    Issue 1410213004:
  Create "persistent memory allocator" for persisting and sharing objects.  (Closed) 
  Base URL: https://chromium.googlesource.com/chromium/src.git@master| Index: base/memory/shared_memory_allocator.cc | 
| diff --git a/base/memory/shared_memory_allocator.cc b/base/memory/shared_memory_allocator.cc | 
| new file mode 100644 | 
| index 0000000000000000000000000000000000000000..6f7f5e86406e038cabded7c0cdbbf653db2e1578 | 
| --- /dev/null | 
| +++ b/base/memory/shared_memory_allocator.cc | 
| @@ -0,0 +1,328 @@ | 
| +// Copyright (c) 2015 The Chromium Authors. All rights reserved. | 
| +// Use of this source code is governed by a BSD-style license that can be | 
| +// found in the LICENSE file. | 
| + | 
| +#include "base/memory/shared_memory_allocator.h" | 
| + | 
| +#include <assert.h> | 
| + | 
| +#include "base/atomicops.h" | 
| +#include "base/logging.h" | 
| + | 
| +// All allocations and data-structures must be aligned to this byte boundary. | 
| +#define ALLOC_ALIGNMENT 16 | 
| + | 
| +// The equivalest NULL value for an offset. | 
| +#define OFFSET_NULL 0 | 
| + | 
| +namespace { | 
| + | 
| +// A constant (random) value placed in the shared meta-data to identify | 
| +// an already initialized memory segment. | 
| +const int32 GLOBAL_COOKIE = 0x408305DC; | 
| + | 
| +// The current version of the meta-data. If updates are made that change | 
| +// the meta-data, the version number can be queried to operate in a backward- | 
| +// compatible manner until the memory segment is completely re-initalized. | 
| +const int32 GLOBAL_VERSION = 1; | 
| + | 
| +// Constant values placed in the block headers to indicate its state. | 
| +const int32 BLOCK_COOKIE_FREE = 0; | 
| +const int32 BLOCK_COOKIE_QUEUE = 1; | 
| +const int32 BLOCK_COOKIE_WASTED = -1; | 
| +const int32 BLOCK_COOKIE_ALLOCATED = 0xC8799269; | 
| + | 
| +} // namespace | 
| + | 
| +namespace base { | 
| + | 
| +struct SharedMemoryAllocator::BlockHeader { | 
| + int32 size; // number of bytes in this block, including header | 
| + int32 cookie; // constant value indicating completed allocation | 
| + int32 type; // a number provided by caller indicating data type | 
| + subtle::Atomic32 next; // pointer to the next block when iterating | 
| +}; | 
| + | 
| +struct SharedMemoryAllocator::SharedMetaData { | 
| + int32 cookie; // some value that indicates complete initialization | 
| + int32 size; // total size of memory segment | 
| + int32 version; // version code so upgrades don't break | 
| + subtle::Atomic32 freeptr; // offset to first free space in the segment | 
| + int32 reserved[2]; | 
| + char corrupted; // flag indicating that corruption has been detected | 
| + char full; // flag indicating that alloc failed because segment is full | 
| + char flags[2]; // align to next int (not strictly needed but avoid confusion) | 
| + | 
| + // The "iterable" queue is an M&S Queue as described here, append-only: | 
| + // https://www.research.ibm.com/people/m/michael/podc-1996.pdf | 
| + subtle::Atomic32 tailptr; // last block available for iteration | 
| + BlockHeader queue; // empty block for linked-list head/tail (must be last) | 
| +}; | 
| + | 
| +// The "queue" block header is used to detect "last node" so that zero/null | 
| +// can be used to indicate that it hasn't been added at all. It is part of | 
| +// the SharedMetaData structure which itself is always located at offset zero. | 
| +#define OFFSET_QUEUE offsetof(SharedMetaData, queue) | 
| + | 
| +SharedMemoryAllocator::SharedMemoryAllocator(void* base, int32 size, | 
| + int32 page) | 
| + : shared_meta_(static_cast<SharedMetaData*>(base)), | 
| + mem_base_(static_cast<char*>(base)), | 
| + mem_size_(size), | 
| + mem_page_(page ? page : size), | 
| + last_seen_(0), | 
| + corrupted_(false) { | 
| + static_assert(sizeof(BlockHeader) % ALLOC_ALIGNMENT == 0, | 
| + "BlockHeader is not a multiple of ALLOC_ALIGNMENT"); | 
| + static_assert(sizeof(SharedMetaData) % ALLOC_ALIGNMENT == 0, | 
| + "SharedMetaData is not a multiple of ALLOC_ALIGNMENT"); | 
| + | 
| + DCHECK(base && reinterpret_cast<uintptr_t>(base) % ALLOC_ALIGNMENT == 0); | 
| + DCHECK(size >= 1 << 10 && size <= 1 << 20 && // 1 KiB <= size <= 1 MiB | 
| + size % ALLOC_ALIGNMENT == 0); | 
| + DCHECK(page >= 0 && (page == 0 || size % page == 0)); | 
| + | 
| + if (shared_meta_->cookie != GLOBAL_COOKIE) { | 
| + // This block is only executed when a completely new memory segment is | 
| + // being initialized. It's unshared and single-threaded... | 
| + const BlockHeader* first_block = reinterpret_cast<BlockHeader*>( | 
| + mem_base_ + sizeof(SharedMetaData)); | 
| + if (shared_meta_->cookie != 0 || | 
| + shared_meta_->size != 0 || | 
| + shared_meta_->version != 0 || | 
| + shared_meta_->freeptr != 0 || | 
| + shared_meta_->corrupted != 0 || | 
| + shared_meta_->full != 0 || | 
| + shared_meta_->tailptr != 0 || | 
| + shared_meta_->queue.cookie != 0 || | 
| + shared_meta_->queue.next != 0 || | 
| + first_block->size != 0 || | 
| + first_block->cookie != 0 || | 
| + first_block->type != 0 || | 
| + first_block->next != 0) { | 
| + // ...or something malicious has been playing with the meta-data. | 
| + SetCorrupted(); | 
| + } | 
| + | 
| + // This is still safe to do even if corruption has been detected. | 
| + shared_meta_->cookie = GLOBAL_COOKIE; | 
| + shared_meta_->size = size; | 
| + shared_meta_->version = GLOBAL_VERSION; | 
| + subtle::NoBarrier_Store(&shared_meta_->freeptr, sizeof(SharedMetaData)); | 
| + | 
| + // Set up the queue of iterable allocations. | 
| + shared_meta_->queue.size = sizeof(BlockHeader); | 
| + shared_meta_->queue.cookie = BLOCK_COOKIE_QUEUE; | 
| + subtle::NoBarrier_Store(&shared_meta_->queue.next, OFFSET_QUEUE); | 
| + subtle::NoBarrier_Store(&shared_meta_->tailptr, OFFSET_QUEUE); | 
| + } | 
| +} | 
| + | 
| +SharedMemoryAllocator::~SharedMemoryAllocator() { | 
| +} | 
| + | 
| +int32 SharedMemoryAllocator::Allocate(int32 size, int32 type) { | 
| + if (size < 0) { | 
| + NOTREACHED(); | 
| + return OFFSET_NULL; | 
| + } | 
| + | 
| + // Round up the requested size, plus header, to the next allocation alignment. | 
| + size += sizeof(BlockHeader); | 
| + size = (size + (ALLOC_ALIGNMENT - 1)) & ~(ALLOC_ALIGNMENT - 1); | 
| + if (size > mem_page_) | 
| + return OFFSET_NULL; | 
| + | 
| + // Allocation is lockless so we do all our caculation and then, if saving | 
| + // indicates a change has occurred since we started, scrap everything and | 
| + // start over. | 
| + for (;;) { | 
| + if (IsCorrupted()) | 
| + return OFFSET_NULL; | 
| + | 
| + int32 freeptr = subtle::Acquire_Load(&shared_meta_->freeptr); | 
| + if (freeptr + size > mem_size_) { | 
| + shared_meta_->full = true; | 
| + return OFFSET_NULL; | 
| + } | 
| + BlockHeader* block = GetBlock(freeptr, 0, 0, true); | 
| + if (!block) { | 
| + SetCorrupted(); | 
| + return OFFSET_NULL; | 
| + } | 
| + | 
| + // An allocation cannot cross page boundaries. If it would, create a | 
| + // "wasted" block and begin again at the top of the next page. | 
| + int32 page_free = mem_page_ - freeptr % mem_page_; | 
| + if (size > page_free) { | 
| + int32 new_freeptr = freeptr + page_free; | 
| + if (subtle::Release_CompareAndSwap( | 
| + &shared_meta_->freeptr, freeptr, new_freeptr) == freeptr) { | 
| + block->size = page_free; | 
| + block->cookie = BLOCK_COOKIE_WASTED; | 
| + } | 
| + continue; | 
| + } | 
| + | 
| + // Don't leave a slice at the end of a page too small for anything. | 
| + if (page_free - size < (int)(sizeof(BlockHeader) + ALLOC_ALIGNMENT)) | 
| + size = page_free; | 
| + | 
| + int32 new_freeptr = freeptr + size; | 
| + if (new_freeptr > mem_size_) { | 
| + SetCorrupted(); | 
| + return OFFSET_NULL; | 
| + } | 
| + | 
| + if (subtle::Release_CompareAndSwap( | 
| + &shared_meta_->freeptr, freeptr, new_freeptr) != freeptr) { | 
| + // Another thread must have completed an allocation while we were working. | 
| + // Try again. | 
| + continue; | 
| + } | 
| + | 
| + // Since allocating a block is atomic and all unallocated memory must be | 
| + // zeros, any other value indicates that something has run amuck. | 
| + if (block->size != 0 || | 
| + block->cookie != BLOCK_COOKIE_FREE || | 
| + block->type != 0 || | 
| + subtle::NoBarrier_Load(&block->next) != 0) { | 
| + SetCorrupted(); | 
| + return OFFSET_NULL; | 
| + } | 
| + | 
| + block->size = size; | 
| + block->cookie = BLOCK_COOKIE_ALLOCATED; | 
| + block->type = type; | 
| + return freeptr; | 
| + } | 
| +} | 
| + | 
| +void SharedMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) { | 
| + int32 remaining = mem_size_ - subtle::NoBarrier_Load(&shared_meta_->freeptr); | 
| + meminfo->total = mem_size_; | 
| + meminfo->free = shared_meta_->corrupted ? 0 : remaining - sizeof(BlockHeader); | 
| +} | 
| + | 
| +void SharedMemoryAllocator::MakeIterable(int32 offset) { | 
| + if (IsCorrupted()) | 
| + return; | 
| + BlockHeader* block = GetBlock(offset, 0, 0, false); | 
| + if (!block) // invalid offset | 
| + return; | 
| + if (subtle::NoBarrier_Load(&block->next) != 0) // previously set iterable | 
| + return; | 
| + subtle::NoBarrier_Store(&block->next, OFFSET_QUEUE); // will be tail block | 
| + | 
| + // Try to add this block to the tail of the queue. May take multiple tries. | 
| + int32 tail; | 
| + for (;;) { | 
| + tail = subtle::Acquire_Load(&shared_meta_->tailptr); | 
| + block = GetBlock(tail, 0, 0, true); | 
| + if (!block) { | 
| + SetCorrupted(); | 
| + return; | 
| + } | 
| + int32 next = subtle::NoBarrier_Load(&block->next); | 
| + | 
| + // Ensure that the tail pointer didn't change while reading next. | 
| + if (tail == subtle::Release_Load(&shared_meta_->tailptr)) { | 
| + // Check if the found block is truely the last in the queue (i.e. it | 
| + // points back to the "queue" node). | 
| + if (next == OFFSET_QUEUE) { | 
| + // Yes. Try to append the passed block after the current tail block. | 
| + if (subtle::Release_CompareAndSwap( | 
| + &block->next, OFFSET_QUEUE, offset) == OFFSET_QUEUE) { | 
| + // Success! The block is enqueued; need to update the tail pointer. | 
| + break; | 
| + } | 
| + } else { | 
| + // No. Another thread has stopped between the block-next update | 
| + // and the tail-pointer update. Try to update tailptr past the | 
| + // found block. That other thread may complete it first or it | 
| + // may have crashed. Be fail-safe. | 
| + subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, next); | 
| + } | 
| + } | 
| + } | 
| + | 
| + // Block has been enqueued. Now update the tail-pointer past it. This | 
| + // could fail if another thread has already completed the operation as | 
| + // part of being fail-safe. | 
| + subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, offset); | 
| +} | 
| + | 
| +int32 SharedMemoryAllocator::GetFirstIterable(Iterator* state, int32* type) { | 
| + state->last = OFFSET_QUEUE; | 
| + return GetNextIterable(state, type); | 
| +} | 
| + | 
| +int32 SharedMemoryAllocator::GetNextIterable(Iterator* state, int32* type) { | 
| + const BlockHeader* block = GetBlock(state->last, 0, 0, true); | 
| + if (!block) // invalid iterator state | 
| + return OFFSET_NULL; | 
| + int32 next = subtle::NoBarrier_Load(&block->next); | 
| + block = GetBlock(next, 0, 0, false); | 
| + if (!block) // no next allocation in queue | 
| + return OFFSET_NULL; | 
| + | 
| + state->last = next; | 
| + *type = block->type; | 
| + return next; | 
| +} | 
| + | 
| +void SharedMemoryAllocator::SetCorrupted() { | 
| + LOG(ERROR) << "Corruption detected in shared-memory segment."; | 
| + corrupted_ = true; | 
| 
Alexander Potapenko
2015/10/30 06:53:13
Can SetCorrupted() and IsCorrupted() be invoked co
 
bcwhite
2015/10/30 14:01:09
It is, but not an important one.  Correct operatio
 | 
| + shared_meta_->corrupted = true; | 
| +} | 
| + | 
| +bool SharedMemoryAllocator::IsCorrupted() { | 
| + if (corrupted_ || shared_meta_->corrupted) { | 
| + SetCorrupted(); // Make sure all indicators are set. | 
| + return true; | 
| + } | 
| + return false; | 
| +} | 
| + | 
| +bool SharedMemoryAllocator::IsFull() { | 
| + return shared_meta_->full != 0; | 
| +} | 
| + | 
| +SharedMemoryAllocator::BlockHeader* SharedMemoryAllocator::GetBlock( | 
| + int32 offset, int32 type, int32 size, bool special) { | 
| + // Validation of parameters. | 
| + if (offset % ALLOC_ALIGNMENT != 0) | 
| + return nullptr; | 
| + if (offset < (int)(special ? OFFSET_QUEUE : sizeof(SharedMetaData))) | 
| + return nullptr; | 
| + size += sizeof(BlockHeader); | 
| + if (offset + size > mem_size_) | 
| + return nullptr; | 
| + int32 freeptr = subtle::NoBarrier_Load(&shared_meta_->freeptr); | 
| + if (offset + size > freeptr + (int)(special ? sizeof(BlockHeader) : 0)) | 
| + return nullptr; | 
| + | 
| + // Validation of referenced block-header. | 
| + const BlockHeader* block = reinterpret_cast<BlockHeader*>(mem_base_ + offset); | 
| + if (offset != freeptr && block->size < size) | 
| + return nullptr; | 
| + if (!special && block->cookie != BLOCK_COOKIE_ALLOCATED) | 
| + return nullptr; | 
| + if (type != 0 && block->type != type) | 
| + return nullptr; | 
| + | 
| + // Return pointer to block data. | 
| + return reinterpret_cast<BlockHeader*>(mem_base_ + offset); | 
| +} | 
| + | 
| +void* SharedMemoryAllocator::GetBlockData(int32 offset, int32 type, | 
| + int32 size, bool special) { | 
| + DCHECK(size > 0); | 
| + BlockHeader* block = GetBlock(offset, type, size, special); | 
| + if (!block) | 
| + return nullptr; | 
| + return reinterpret_cast<char*>(block) + sizeof(BlockHeader); | 
| +} | 
| + | 
| +} // namespace base |