Chromium Code Reviews| Index: base/memory/shared_memory_allocator.cc |
| diff --git a/base/memory/shared_memory_allocator.cc b/base/memory/shared_memory_allocator.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..f1f55bfebf544b4e8f69dd41e2712bb92bb5b1b7 |
| --- /dev/null |
| +++ b/base/memory/shared_memory_allocator.cc |
| @@ -0,0 +1,417 @@ |
| +// Copyright (c) 2015 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "base/memory/shared_memory_allocator.h" |
| + |
| +#include <assert.h> |
| + |
| +#include "base/logging.h" |
| + |
| +// All integer constants in this file are signed because Atomic32 is signed |
| +// and keeping all others consistent with this avoids a lot of unnecessary |
| +// casting to avoid signed/unsigned operations just to avoid compiler errors. |
| +// This means an occasonal cast of a constant from sizeof() to "int" but |
| +// is far simpler than the alternative. |
| + |
| +namespace { |
| + |
| +// All allocations and data-structures must be aligned to this byte boundary. |
| +// It shouldn't be less than 8 so that 64-bit values can be read in a single |
|
Dmitry Vyukov
2015/11/03 14:06:46
What architecture do you have in mind? And what ab
bcwhite
2015/11/03 16:28:20
Comment expanded and updated. I don't have a part
|
| +// RAM bus access. 16 can be used so that the block header would always fall |
| +// within a single cache line. |
| +const int32_t kAllocAlignment = 8; |
| + |
| +// A constant (random) value placed in the shared metadata to identify |
| +// an already initialized memory segment. |
| +const int32_t kGlobalCookie = 0x408305DC; |
| + |
| +// The current version of the metadata. If updates are made that change |
| +// the metadata, the version number can be queried to operate in a backward- |
| +// compatible manner until the memory segment is completely re-initalized. |
| +const int32_t kGlobalVersion = 1; |
| + |
| +// Constant values placed in the block headers to indicate its state. |
| +const int32_t kBlockCookieFree = 0; |
| +const int32_t kBlockCookieQueue = 1; |
| +const int32_t kBlockCookieWasted = -1; |
| +const int32_t kBlockCookieAllocated = 0xC8799269; |
| + |
| +// TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> |
| +// types rather than combined bitfield. |
| + |
| +enum { |
| + kFlagCorrupted, |
| + kFlagFull |
| +}; |
| + |
| +bool CheckFlag(base::subtle::Atomic32* flags, int flag) { |
| + base::subtle::Atomic32 loaded_flags = base::subtle::NoBarrier_Load(flags); |
|
Alexander Potapenko
2015/11/03 08:12:54
It's not immediately evident whether the currently
bcwhite
2015/11/03 16:28:19
Done.
|
| + return (loaded_flags & 1 << flag) != 0; |
| +} |
| + |
| +void SetFlag(base::subtle::Atomic32* flags, int flag, bool set) { |
|
Dmitry Vyukov
2015/11/03 14:06:45
You never pass set=false. Please delete it. This c
bcwhite
2015/11/03 16:28:20
Done.
|
| + for (;;) { |
| + base::subtle::Atomic32 loaded_flags = base::subtle::NoBarrier_Load(flags); |
| + base::subtle::Atomic32 new_flags = |
| + (loaded_flags & ~(1 << flag)) | (set ? 1 : 0) << flag; |
| + if (base::subtle::Release_CompareAndSwap( |
|
Dmitry Vyukov
2015/11/03 14:06:45
You use NoBarrier_Load to load flags, so Release_C
bcwhite
2015/11/03 16:28:20
Done.
|
| + flags, loaded_flags, new_flags) == loaded_flags) { |
| + break; |
| + } |
| + } |
| +} |
| + |
| +} // namespace |
| + |
| +namespace base { |
| + |
| +// The block-header is placed at the top of every allocation within the |
| +// segment to describe the data that follows it. |
| +struct SharedMemoryAllocator::BlockHeader { |
| + int32_t size; // Number of bytes in this block, including header. |
| + int32_t cookie; // Constant value indicating completed allocation. |
| + int32_t type; // A number provided by caller indicating data type. |
| + subtle::Atomic32 next; // Pointer to the next block when iterating |
|
Dmitry Vyukov
2015/11/03 14:06:45
add . at the end of comment for consistency
bcwhite
2015/11/03 16:28:19
Done.
|
| +}; |
| + |
| +// The shared metadata exists once at the top of the memory segment to |
| +// describe the state of the allocator to all processes. |
| +struct SharedMemoryAllocator::SharedMetadata { |
| + int32_t cookie; // Some value that indicates complete initialization. |
| + int32_t size; // Total size of memory segment. |
| + int32_t page_size; // Paging size within memory segment. |
| + int32_t version; // Version code so upgrades don't break. |
| + subtle::Atomic32 freeptr; // Offset to first free space in the segment. |
| + subtle::Atomic32 flags; // Bitfield of information flags. |
| + int32_t reserved; // Padding to ensure size is multiple of alignment. |
| + |
| + // The "iterable" queue is an M&S Queue as described here, append-only: |
| + // https://www.research.ibm.com/people/m/michael/podc-1996.pdf |
| + subtle::Atomic32 tailptr; // Last block available for iteration. |
| + BlockHeader queue; // Empty block for linked-list head/tail. (must be last) |
| +}; |
| + |
| +// The "queue" block header is used to detect "last node" so that zero/null |
| +// can be used to indicate that it hasn't been added at all. It is part of |
| +// the SharedMetadata structure which itself is always located at offset zero. |
| +// This can't be a constant because SharedMetadata is a private definition. |
| +#define OFFSET_QUEUE offsetof(SharedMetadata, queue) |
| +#define OFFSET_NULL 0 // the equivalest NULL value for an offset |
| + |
| +SharedMemoryAllocator::SharedMemoryAllocator(void* base, int32_t size, |
| + int32_t page_size) |
| + : shared_meta_(static_cast<SharedMetadata*>(base)), |
| + mem_base_(static_cast<char*>(base)), |
| + mem_size_(size), |
| + mem_page_(page_size ? page_size : size), |
| + last_seen_(0), |
| + corrupted_(0) { |
| + static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, |
| + "BlockHeader is not a multiple of kAllocAlignment"); |
| + static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, |
| + "SharedMetadata is not a multiple of kAllocAlignment"); |
| + |
| + DCHECK(base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0); |
|
Alexander Potapenko
2015/11/03 08:12:54
These invariants shouldn't be checked often, shoul
bcwhite
2015/11/03 16:28:19
Done.
|
| + DCHECK(size >= 1 << 10 && size <= 1 << 20 && // 1 KiB <= size <= 1 MiB |
| + size % kAllocAlignment == 0); |
| + DCHECK(page_size >= 0 && (page_size == 0 || size % page_size == 0)); |
| + |
| + if (shared_meta_->cookie != kGlobalCookie) { |
| + // This block is only executed when a completely new memory segment is |
| + // being initialized. It's unshared and single-threaded... |
| + const BlockHeader* first_block = reinterpret_cast<BlockHeader*>( |
| + mem_base_ + sizeof(SharedMetadata)); |
| + if (shared_meta_->cookie != 0 || |
| + shared_meta_->size != 0 || |
| + shared_meta_->version != 0 || |
| + subtle::NoBarrier_Load(&shared_meta_->freeptr) != 0 || |
| + subtle::NoBarrier_Load(&shared_meta_->flags) != 0 || |
| + shared_meta_->tailptr != 0 || |
| + shared_meta_->queue.cookie != 0 || |
| + subtle::NoBarrier_Load(&shared_meta_->queue.next) != 0 || |
| + first_block->size != 0 || |
| + first_block->cookie != 0 || |
| + first_block->type != 0 || |
| + first_block->next != 0) { |
| + // ...or something malicious has been playing with the metadata. |
| + NOTREACHED(); |
| + SetCorrupted(); |
| + } |
| + |
| + // This is still safe to do even if corruption has been detected. |
| + shared_meta_->cookie = kGlobalCookie; |
| + shared_meta_->size = size; |
| + shared_meta_->page_size = page_size; |
| + shared_meta_->version = kGlobalVersion; |
| + subtle::NoBarrier_Store(&shared_meta_->freeptr, sizeof(SharedMetadata)); |
| + |
| + // Set up the queue of iterable allocations. |
| + shared_meta_->queue.size = sizeof(BlockHeader); |
| + shared_meta_->queue.cookie = kBlockCookieQueue; |
| + subtle::NoBarrier_Store(&shared_meta_->queue.next, OFFSET_QUEUE); |
| + subtle::NoBarrier_Store(&shared_meta_->tailptr, OFFSET_QUEUE); |
| + } else { |
| + // The allocator is attaching to a previously initialized segment of |
| + // memory. Make sure the embedded data matches what has been passed. |
| + if (shared_meta_->size != size || |
| + shared_meta_->page_size != page_size) { |
| + NOTREACHED(); |
| + SetCorrupted(); |
| + } |
| + } |
| +} |
| + |
| +SharedMemoryAllocator::~SharedMemoryAllocator() { |
| +} |
| + |
| +int32_t SharedMemoryAllocator::Allocate(int32_t size, int32_t type) { |
| + if (size < 0) { |
| + NOTREACHED(); |
| + return OFFSET_NULL; |
| + } |
| + |
| + // Round up the requested size, plus header, to the next allocation alignment. |
| + size += sizeof(BlockHeader); |
| + size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); |
| + if (size > mem_page_) |
| + return OFFSET_NULL; |
| + |
| + // Allocation is lockless so we do all our caculation and then, if saving |
| + // indicates a change has occurred since we started, scrap everything and |
| + // start over. |
| + for (;;) { |
| + if (IsCorrupted()) |
| + return OFFSET_NULL; |
| + |
| + int32_t freeptr = subtle::Acquire_Load(&shared_meta_->freeptr); |
| + if (freeptr + size > mem_size_) { |
| + SetFlag(&shared_meta_->flags, kFlagFull, true); |
| + return OFFSET_NULL; |
| + } |
| + |
| + // Get pointer to the "free" block. It doesn't even have a header; pass |
| + // -sizeof(header) so accouting for that will yield an expected size of |
| + // zero which is what will be stored at that location. If something |
| + // has been allocated since the load of freeptr above, it is still safe |
| + // as nothing will be written to that location until after the CAS below. |
| + BlockHeader* block = GetBlock(freeptr, 0, -(int)sizeof(BlockHeader), true); |
| + if (!block) { |
| + SetCorrupted(); |
| + return OFFSET_NULL; |
| + } |
| + |
| + // An allocation cannot cross page boundaries. If it would, create a |
| + // "wasted" block and begin again at the top of the next page. |
| + int32_t page_free = mem_page_ - freeptr % mem_page_; |
| + if (size > page_free) { |
| + int32_t new_freeptr = freeptr + page_free; |
| + if (subtle::Release_CompareAndSwap( |
| + &shared_meta_->freeptr, freeptr, new_freeptr) == freeptr) { |
| + block->size = page_free; |
| + block->cookie = kBlockCookieWasted; |
| + } |
| + continue; |
| + } |
| + |
| + // Don't leave a slice at the end of a page too small for anything. This |
| + // can result in an allocation up to two alignment-sizes greater than the |
| + // minimum required by requested-size + header + alignment. |
| + if (page_free - size < (int)(sizeof(BlockHeader) + kAllocAlignment)) |
| + size = page_free; |
| + |
| + int32_t new_freeptr = freeptr + size; |
| + if (new_freeptr > mem_size_) { |
| + SetCorrupted(); |
| + return OFFSET_NULL; |
| + } |
| + |
| + if (subtle::Release_CompareAndSwap( |
| + &shared_meta_->freeptr, freeptr, new_freeptr) != freeptr) { |
| + // Another thread must have completed an allocation while we were working. |
| + // Try again. |
| + continue; |
| + } |
| + |
| + // Given that all memory was zeroed before ever being given to an instance |
| + // of this class and given that we only allocate in a monotomic fashion |
| + // going forward, it must be that the newly allocated block is completely |
| + // full of zeros. If we find anything in the block header that is NOT a |
| + // zero then something must have previously run amuck through memory, |
| + // writing beyond the allocated space and into unallocated space. |
| + if (block->size != 0 || |
| + block->cookie != kBlockCookieFree || |
| + block->type != 0 || |
| + subtle::NoBarrier_Load(&block->next) != 0) { |
| + SetCorrupted(); |
| + return OFFSET_NULL; |
| + } |
| + |
| + block->size = size; |
| + block->cookie = kBlockCookieAllocated; |
| + block->type = type; |
| + return freeptr; |
| + } |
| +} |
| + |
| +void SharedMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) { |
| + int32_t remaining = |
| + mem_size_ - subtle::NoBarrier_Load(&shared_meta_->freeptr); |
| + meminfo->total = mem_size_; |
| + meminfo->free = IsCorrupted() ? 0 : remaining - sizeof(BlockHeader); |
| +} |
| + |
| +void SharedMemoryAllocator::MakeIterable(int32_t offset) { |
| + if (IsCorrupted()) |
| + return; |
| + BlockHeader* block = GetBlock(offset, 0, 0, false); |
| + if (!block) // invalid offset |
| + return; |
| + if (subtle::NoBarrier_Load(&block->next) != 0) // previously set iterable |
| + return; |
| + subtle::NoBarrier_Store(&block->next, OFFSET_QUEUE); // will be tail block |
|
Dmitry Vyukov
2015/11/03 14:06:46
Why don't you use a lock-free stack?
The stack alg
bcwhite
2015/11/03 16:28:19
Honestly, because an M&S Queue was what Alexander
Alexander Potapenko
2015/11/03 17:25:15
Was that another Alexander? I never mentioned M&S
Dmitry Vyukov
2015/11/03 18:15:07
Makes sense.
I guess we could do something along
|
| + |
| + // Try to add this block to the tail of the queue. May take multiple tries. |
| + int32_t tail; |
| + for (;;) { |
| + tail = subtle::Acquire_Load(&shared_meta_->tailptr); |
| + block = GetBlock(tail, 0, 0, true); |
| + if (!block) { |
| + SetCorrupted(); |
| + return; |
| + } |
| + int32_t next = subtle::NoBarrier_Load(&block->next); |
| + |
| + // Ensure that the tail pointer didn't change while reading next. Only |
| + // the read of the tail pointer is atomic but we need to read both the |
| + // tail pointer and the next pointer from it in an atomic fashion. The |
| + // way to do this is to read both non-atomically and then verify after |
| + // the second read that the first read is still valid/unchanged. |
| + if (tail == subtle::Release_Load(&shared_meta_->tailptr)) { |
| + // Check if the found block is truely the last in the queue (i.e. it |
| + // points back to the "queue" node). |
| + if (next == OFFSET_QUEUE) { |
| + // Yes. Try to append the passed block after the current tail block. |
| + if (subtle::Release_CompareAndSwap( |
| + &block->next, OFFSET_QUEUE, offset) == OFFSET_QUEUE) { |
| + // Success! The block is enqueued; need to update the tail pointer. |
| + break; |
| + } |
| + } else { |
| + // No. Another thread has stopped between the block-next update |
| + // and the tail-pointer update. Try to update tailptr past the |
| + // found block. That other thread may complete it first or it |
| + // may have crashed. Be fail-safe. |
| + subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, next); |
| + } |
| + } |
| + } |
| + |
| + // Block has been enqueued. Now update the tail-pointer past it. This |
| + // could fail if another thread has already completed the operation as |
| + // part of being fail-safe. |
| + subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, offset); |
| +} |
| + |
| +void SharedMemoryAllocator::CreateIterator(Iterator* state) { |
| + state->last = OFFSET_QUEUE; |
| + state->loop_detector = OFFSET_QUEUE; |
| +} |
| + |
| +int32_t SharedMemoryAllocator::GetNextIterable(Iterator* state, int32_t* type) { |
| + const BlockHeader* block = GetBlock(state->last, 0, 0, true); |
| + if (!block) // invalid iterator state |
| + return OFFSET_NULL; |
| + int32_t next = subtle::NoBarrier_Load(&block->next); |
| + block = GetBlock(next, 0, 0, false); |
| + if (!block) // no next allocation in queue |
| + return OFFSET_NULL; |
| + if (next == state->loop_detector) { |
| + SetCorrupted(); |
| + return OFFSET_NULL; |
| + } |
| + |
| + state->last = next; |
| + *type = block->type; |
| + |
| + // Memory corruption could cause a loop in the list. We need to detect |
| + // that so as to not cause an infinite loop in the caller. This is done |
| + // by having a second pointer that double-increments through the list. |
| + // If it ever comes around to match "last" then we have a loop and need |
| + // to stop iterating. It's possible to not iterate through all items and |
| + // it's possible to loop multiple times before the loop is detected but at |
| + // least it stops. |
| + if (state->loop_detector == OFFSET_QUEUE) |
|
Dmitry Vyukov
2015/11/03 14:06:46
There is a simpler way to do it:
count number of
bcwhite
2015/11/03 16:28:19
Great! Though mine worked, it had to change anywa
|
| + state->loop_detector = next; |
| + block = GetBlock(state->loop_detector, 0, 0, false); |
| + if (block) { |
| + state->loop_detector = subtle::NoBarrier_Load(&block->next); |
| + block = GetBlock(state->loop_detector, 0, 0, false); |
| + if (block) |
| + state->loop_detector = subtle::NoBarrier_Load(&block->next); |
| + } |
| + |
| + return next; |
| +} |
| + |
| +void SharedMemoryAllocator::SetCorrupted() { |
| + LOG(ERROR) << "Corruption detected in shared-memory segment."; |
| + subtle::NoBarrier_Store(&corrupted_, 1); |
|
Alexander Potapenko
2015/11/03 08:12:54
Why do you need both corrupted_ and kFlagCorrupted
bcwhite
2015/11/03 16:28:19
The shared flag can't be trusted since a malicious
|
| + SetFlag(&shared_meta_->flags, kFlagCorrupted, true); |
| +} |
| + |
| +bool SharedMemoryAllocator::IsCorrupted() { |
| + if (subtle::NoBarrier_Load(&corrupted_) || |
| + CheckFlag(&shared_meta_->flags, kFlagCorrupted)) { |
| + SetCorrupted(); // Make sure all indicators are set. |
| + return true; |
| + } |
| + return false; |
| +} |
| + |
| +bool SharedMemoryAllocator::IsFull() { |
| + return CheckFlag(&shared_meta_->flags, kFlagFull); |
| +} |
| + |
| +// Dereference a block |offset| and ensure that it's valid for the desired |
| +// |type| and |size|. |special| indicates that we may try to access block |
| +// headers not available to callers but still accessed by this module. By |
| +// having internal dereferences go through this same function, the allocator |
| +// is hardened against corruption. |
| +SharedMemoryAllocator::BlockHeader* SharedMemoryAllocator::GetBlock( |
| + int32_t offset, int32_t type, int32_t size, bool special) { |
| + // Validation of parameters. |
| + if (offset % kAllocAlignment != 0) |
| + return nullptr; |
| + if (offset < (int)(special ? OFFSET_QUEUE : sizeof(SharedMetadata))) |
| + return nullptr; |
| + size += sizeof(BlockHeader); |
| + if (offset + size > mem_size_) |
| + return nullptr; |
| + int32_t freeptr = subtle::NoBarrier_Load(&shared_meta_->freeptr); |
| + if (offset + size > freeptr) |
| + return nullptr; |
| + |
| + // Validation of referenced block-header. |
| + const BlockHeader* block = reinterpret_cast<BlockHeader*>(mem_base_ + offset); |
| + if (block->size < size) |
| + return nullptr; |
| + if (!special && block->cookie != kBlockCookieAllocated) |
| + return nullptr; |
| + if (type != 0 && block->type != type) |
| + return nullptr; |
| + |
| + // Return pointer to block data. |
| + return reinterpret_cast<BlockHeader*>(mem_base_ + offset); |
| +} |
| + |
| +void* SharedMemoryAllocator::GetBlockData(int32_t offset, int32_t type, |
| + int32_t size, bool special) { |
| + DCHECK(size > 0); |
| + BlockHeader* block = GetBlock(offset, type, size, special); |
| + if (!block) |
| + return nullptr; |
| + return reinterpret_cast<char*>(block) + sizeof(BlockHeader); |
| +} |
| + |
| +} // namespace base |