Index: base/memory/shared_memory_allocator.cc |
diff --git a/base/memory/shared_memory_allocator.cc b/base/memory/shared_memory_allocator.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..670db66da47d9e60be1af25e681dc7ea43a42c6c |
--- /dev/null |
+++ b/base/memory/shared_memory_allocator.cc |
@@ -0,0 +1,424 @@ |
+// Copyright (c) 2015 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "base/memory/shared_memory_allocator.h" |
+ |
+#include <assert.h> |
+#include <algorithm> |
+ |
+#include "base/logging.h" |
+ |
+// All integer constants in this file are signed because Atomic32 is signed |
+// and keeping all others consistent with this avoids a lot of unnecessary |
+// casting to avoid signed/unsigned operations just to avoid compiler errors. |
+// This means an occasonal cast of a constant from sizeof() to "int" but |
+// is far simpler than the alternative. |
+ |
+namespace { |
+ |
+// All allocations and data-structures must be aligned to this byte boundary. |
+// Alignment as large as the physical bus between CPU and RAM is _required_ |
+// for some architectures, is simply more efficient on other CPUs, and |
+// generally a Good Idea(tm) for all platforms as it reduces/eliminates the |
+// chance that a type will span cache lines. Alignment mustn't be less |
+// than 8 to ensure proper alignment for all types. The rest is a balance |
+// between reducing spans across multiple cache lines and wasted space spent |
+// padding out allocations. An alignment of 16 would ensure that the block |
+// header structure always sits in a single cache line. An average of about |
+// 1/2 this value will be wasted with every allocation. |
+const int32_t kAllocAlignment = 8; |
+ |
+// A constant (random) value placed in the shared metadata to identify |
+// an already initialized memory segment. |
+const int32_t kGlobalCookie = 0x408305DC; |
+ |
+// The current version of the metadata. If updates are made that change |
+// the metadata, the version number can be queried to operate in a backward- |
+// compatible manner until the memory segment is completely re-initalized. |
+const int32_t kGlobalVersion = 1; |
+ |
+// Constant values placed in the block headers to indicate its state. |
+const int32_t kBlockCookieFree = 0; |
+const int32_t kBlockCookieQueue = 1; |
+const int32_t kBlockCookieWasted = -1; |
+const int32_t kBlockCookieAllocated = 0xC8799269; |
+ |
+// TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> |
+// types rather than combined bitfield. |
+ |
+enum { |
+ kFlagCorrupted, |
+ kFlagFull |
+}; |
+ |
+bool CheckFlag(base::subtle::Atomic32* flags, int flag) { |
+ base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags); |
+ return (loaded_flags & 1 << flag) != 0; |
+} |
+ |
+void SetFlag(base::subtle::Atomic32* flags, int flag) { |
+ for (;;) { |
+ base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags); |
+ base::subtle::Atomic32 new_flags = |
+ (loaded_flags & ~(1 << flag)) | (1 << flag); |
+ if (base::subtle::Release_CompareAndSwap( |
+ flags, loaded_flags, new_flags) == loaded_flags) { |
+ break; |
+ } |
+ } |
+} |
+ |
+} // namespace |
+ |
+namespace base { |
+ |
+// The block-header is placed at the top of every allocation within the |
+// segment to describe the data that follows it. |
+struct SharedMemoryAllocator::BlockHeader { |
+ int32_t size; // Number of bytes in this block, including header. |
+ int32_t cookie; // Constant value indicating completed allocation. |
+ int32_t type_id; // A number provided by caller indicating data type. |
+ subtle::Atomic32 next; // Pointer to the next block when iterating. |
+}; |
+ |
+// The shared metadata exists once at the top of the memory segment to |
+// describe the state of the allocator to all processes. |
+struct SharedMemoryAllocator::SharedMetadata { |
+ int32_t cookie; // Some value that indicates complete initialization. |
+ int32_t size; // Total size of memory segment. |
+ int32_t page_size; // Paging size within memory segment. |
+ int32_t version; // Version code so upgrades don't break. |
+ subtle::Atomic32 freeptr; // Offset to first free space in the segment. |
+ subtle::Atomic32 flags; // Bitfield of information flags. |
+ int32_t reserved; // Padding to ensure size is multiple of alignment. |
+ |
+ // The "iterable" queue is an M&S Queue as described here, append-only: |
+ // https://www.research.ibm.com/people/m/michael/podc-1996.pdf |
+ subtle::Atomic32 tailptr; // Last block available for iteration. |
+ BlockHeader queue; // Empty block for linked-list head/tail. (must be last) |
+}; |
+ |
+// The "queue" block header is used to detect "last node" so that zero/null |
+// can be used to indicate that it hasn't been added at all. It is part of |
+// the SharedMetadata structure which itself is always located at offset zero. |
+// This can't be a constant because SharedMetadata is a private definition. |
+#define OFFSET_QUEUE offsetof(SharedMetadata, queue) |
+#define OFFSET_NULL 0 // the equivalest NULL value for an offset |
+ |
+SharedMemoryAllocator::SharedMemoryAllocator(void* base, |
+ int32_t size, |
+ int32_t page_size) |
+ : shared_meta_(static_cast<SharedMetadata*>(base)), |
+ mem_base_(static_cast<char*>(base)), |
+ mem_size_(size), |
+ mem_page_(page_size ? page_size : size), |
+ corrupted_(0) { |
+ static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, |
+ "BlockHeader is not a multiple of kAllocAlignment"); |
+ static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, |
+ "SharedMetadata is not a multiple of kAllocAlignment"); |
+ |
+ CHECK(base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0); |
+ CHECK(size >= 1 << 10 && size <= 1 << 20 && // 1 KiB <= size <= 1 MiB |
+ size % kAllocAlignment == 0); |
+ CHECK(page_size >= 0 && (page_size == 0 || size % page_size == 0)); |
+ |
+ if (shared_meta_->cookie != kGlobalCookie) { |
+ // This block is only executed when a completely new memory segment is |
+ // being initialized. It's unshared and single-threaded... |
+ const BlockHeader* first_block = reinterpret_cast<BlockHeader*>( |
+ mem_base_ + sizeof(SharedMetadata)); |
+ if (shared_meta_->cookie != 0 || |
+ shared_meta_->size != 0 || |
+ shared_meta_->version != 0 || |
+ subtle::NoBarrier_Load(&shared_meta_->freeptr) != 0 || |
+ subtle::NoBarrier_Load(&shared_meta_->flags) != 0 || |
+ shared_meta_->tailptr != 0 || |
+ shared_meta_->queue.cookie != 0 || |
+ subtle::NoBarrier_Load(&shared_meta_->queue.next) != 0 || |
+ first_block->size != 0 || |
+ first_block->cookie != 0 || |
+ first_block->type_id != 0 || |
+ first_block->next != 0) { |
+ // ...or something malicious has been playing with the metadata. |
+ NOTREACHED(); |
+ SetCorrupted(); |
+ } |
+ |
+ // This is still safe to do even if corruption has been detected. |
+ shared_meta_->cookie = kGlobalCookie; |
+ shared_meta_->size = size; |
+ shared_meta_->page_size = page_size; |
+ shared_meta_->version = kGlobalVersion; |
+ subtle::NoBarrier_Store(&shared_meta_->freeptr, sizeof(SharedMetadata)); |
+ |
+ // Set up the queue of iterable allocations. |
+ shared_meta_->queue.size = sizeof(BlockHeader); |
+ shared_meta_->queue.cookie = kBlockCookieQueue; |
+ subtle::NoBarrier_Store(&shared_meta_->queue.next, OFFSET_QUEUE); |
+ subtle::NoBarrier_Store(&shared_meta_->tailptr, OFFSET_QUEUE); |
+ } else { |
+ // The allocator is attaching to a previously initialized segment of |
+ // memory. Make sure the embedded data matches what has been passed. |
+ if (shared_meta_->size != size || shared_meta_->page_size != page_size) { |
+ NOTREACHED(); |
+ SetCorrupted(); |
+ } |
+ } |
+} |
+ |
+SharedMemoryAllocator::~SharedMemoryAllocator() {} |
+ |
+int32_t SharedMemoryAllocator::Allocate(int32_t size, int32_t type_id) { |
+ if (size < 0) { |
Dmitry Vyukov
2015/11/04 13:52:29
check that size != 0 as well
in GetNextIterable an
bcwhite
2015/11/04 17:18:55
Done.
|
+ NOTREACHED(); |
+ return OFFSET_NULL; |
+ } |
+ |
+ // Round up the requested size, plus header, to the next allocation alignment. |
+ size += sizeof(BlockHeader); |
Dmitry Vyukov
2015/11/04 13:52:29
check for overflow, rendered can pass INT_MAX-1
no
bcwhite
2015/11/04 17:18:55
Done.
|
+ size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); |
+ if (size > mem_page_) |
Dmitry Vyukov
2015/11/04 13:52:29
check that size <= page_size
|
+ return OFFSET_NULL; |
+ |
+ // Allocation is lockless so we do all our caculation and then, if saving |
+ // indicates a change has occurred since we started, scrap everything and |
+ // start over. |
+ for (;;) { |
+ if (IsCorrupted()) |
+ return OFFSET_NULL; |
+ |
+ int32_t freeptr = subtle::Acquire_Load(&shared_meta_->freeptr); |
Dmitry Vyukov
2015/11/04 13:52:29
What do we acquire here? Where is the pairing rele
bcwhite
2015/11/04 17:18:55
It's the CAS on line 214 or 234 (only one is execu
Dmitry Vyukov
2015/11/05 10:49:42
Acquire or release operation that is not paired wi
bcwhite
2015/11/05 14:37:15
I've been thinking about acquire/release in terms
Dmitry Vyukov
2015/11/05 16:38:12
Correct.
bcwhite
2015/11/05 17:06:30
Ahhh! So it's not that specific value we're acqui
|
+ if (freeptr + size > mem_size_) { |
+ SetFlag(&shared_meta_->flags, kFlagFull); |
+ return OFFSET_NULL; |
+ } |
+ |
+ // Get pointer to the "free" block. It doesn't even have a header; pass |
+ // -sizeof(header) so accouting for that will yield an expected size of |
+ // zero which is what will be stored at that location. If something |
+ // has been allocated since the load of freeptr above, it is still safe |
+ // as nothing will be written to that location until after the CAS below. |
+ BlockHeader* block = GetBlock(freeptr, 0, -(int)sizeof(BlockHeader), true); |
+ if (!block) { |
+ SetCorrupted(); |
+ return OFFSET_NULL; |
+ } |
+ |
+ // An allocation cannot cross page boundaries. If it would, create a |
+ // "wasted" block and begin again at the top of the next page. |
+ int32_t page_free = mem_page_ - freeptr % mem_page_; |
+ if (size > page_free) { |
Dmitry Vyukov
2015/11/04 13:52:29
%K returns value in [0, K), not [1, K]
check for p
bcwhite
2015/11/04 17:18:55
I want [0, K). If "freeptr" points to the start o
Dmitry Vyukov
2015/11/04 17:33:07
I may be missing something then.
If we get page_fr
bcwhite
2015/11/04 18:40:16
mem_page_ > 0 therefore
freeptr % mem_page_ < mem
Dmitry Vyukov
2015/11/05 10:49:42
Aha! I missed "mem_page_ - " part. Sorry.
|
+ int32_t new_freeptr = freeptr + page_free; |
+ if (subtle::Release_CompareAndSwap( |
Dmitry Vyukov
2015/11/04 13:52:29
What do we release here? Where is the pairing acqu
|
+ &shared_meta_->freeptr, freeptr, new_freeptr) == freeptr) { |
+ block->size = page_free; |
Dmitry Vyukov
2015/11/04 13:52:29
Why do we need this?
We not don't iterate the regi
bcwhite
2015/11/04 17:18:55
I suppose it could be omitted now. It was part of
Dmitry Vyukov
2015/11/04 17:33:08
I don't object too much. But then add a comment.
F
bcwhite
2015/11/04 18:40:16
Done.
|
+ block->cookie = kBlockCookieWasted; |
+ } |
+ continue; |
+ } |
+ |
+ // Don't leave a slice at the end of a page too small for anything. This |
+ // can result in an allocation up to two alignment-sizes greater than the |
+ // minimum required by requested-size + header + alignment. |
+ if (page_free - size < (int)(sizeof(BlockHeader) + kAllocAlignment)) |
+ size = page_free; |
+ |
+ int32_t new_freeptr = freeptr + size; |
+ if (new_freeptr > mem_size_) { |
+ SetCorrupted(); |
+ return OFFSET_NULL; |
+ } |
+ |
+ if (subtle::Release_CompareAndSwap( |
Dmitry Vyukov
2015/11/04 13:52:29
What do we release here? Where is the pairing acqu
|
+ &shared_meta_->freeptr, freeptr, new_freeptr) != freeptr) { |
+ // Another thread must have completed an allocation while we were working. |
+ // Try again. |
+ continue; |
+ } |
+ |
+ // Given that all memory was zeroed before ever being given to an instance |
+ // of this class and given that we only allocate in a monotomic fashion |
+ // going forward, it must be that the newly allocated block is completely |
+ // full of zeros. If we find anything in the block header that is NOT a |
+ // zero then something must have previously run amuck through memory, |
+ // writing beyond the allocated space and into unallocated space. |
+ if (block->size != 0 || |
+ block->cookie != kBlockCookieFree || |
+ block->type_id != 0 || |
+ subtle::NoBarrier_Load(&block->next) != 0) { |
+ SetCorrupted(); |
+ return OFFSET_NULL; |
+ } |
+ |
+ block->size = size; |
Dmitry Vyukov
2015/11/04 13:52:29
These should be atomic stores as they race with al
bcwhite
2015/11/04 17:18:55
You mean the checks on lines 247-250?
Dmitry Vyukov
2015/11/04 17:33:08
I mean checks in GetBlock done by another thread w
|
+ block->cookie = kBlockCookieAllocated; |
+ block->type_id = type_id; |
+ return freeptr; |
+ } |
+} |
+ |
+void SharedMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) { |
+ int32_t remaining = |
+ mem_size_ - subtle::NoBarrier_Load(&shared_meta_->freeptr); |
+ meminfo->total = mem_size_; |
+ meminfo->free = IsCorrupted() ? 0 : remaining - sizeof(BlockHeader); |
+} |
+ |
+void SharedMemoryAllocator::MakeIterable(int32_t offset) { |
+ if (IsCorrupted()) |
+ return; |
+ BlockHeader* block = GetBlock(offset, 0, 0, false); |
+ if (!block) // invalid offset |
+ return; |
+ if (subtle::NoBarrier_Load(&block->next) != 0) // previously set iterable |
+ return; |
+ subtle::NoBarrier_Store(&block->next, OFFSET_QUEUE); // will be tail block |
+ |
+ // Try to add this block to the tail of the queue. May take multiple tries. |
+ int32_t tail; |
+ for (;;) { |
+ tail = subtle::Acquire_Load(&shared_meta_->tailptr); |
+ block = GetBlock(tail, 0, 0, true); |
+ if (!block) { |
+ SetCorrupted(); |
+ return; |
+ } |
+ int32_t next = subtle::NoBarrier_Load(&block->next); |
+ |
+ // Ensure that the tail pointer didn't change while reading next. Only |
+ // the read of the tail pointer is atomic but we need to read both the |
+ // tail pointer and the next pointer from it in an atomic fashion. The |
+ // way to do this is to read both non-atomically and then verify after |
+ // the second read that the first read is still valid/unchanged. |
+ if (tail == subtle::Release_Load(&shared_meta_->tailptr)) { |
Dmitry Vyukov
2015/11/04 13:52:29
Why do we need the atomic read of both fields?
nex
bcwhite
2015/11/04 17:18:55
This is how it is done in the M&S Queue paper -- t
Dmitry Vyukov
2015/11/04 17:33:08
What will break if we remove the CAS?
bcwhite
2015/11/04 18:40:16
As I understand it...
If we remove the block-next
Dmitry Vyukov
2015/11/05 10:49:42
We need to understand this algorithm well enough t
bcwhite
2015/11/05 14:37:15
Fair enough. Let's see what happens.
|
+ // Check if the found block is truely the last in the queue (i.e. it |
+ // points back to the "queue" node). |
+ if (next == OFFSET_QUEUE) { |
+ // Yes. Try to append the passed block after the current tail block. |
+ if (subtle::Release_CompareAndSwap( |
+ &block->next, OFFSET_QUEUE, offset) == OFFSET_QUEUE) { |
+ // Success! The block is enqueued; need to update the tail pointer. |
+ break; |
+ } |
+ } else { |
+ // No. Another thread has stopped between the block-next update |
+ // and the tail-pointer update. Try to update tailptr past the |
+ // found block. That other thread may complete it first or it |
+ // may have crashed. Be fail-safe. |
+ subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, next); |
+ } |
+ } |
+ } |
+ |
+ // Block has been enqueued. Now update the tail-pointer past it. This |
+ // could fail if another thread has already completed the operation as |
+ // part of being fail-safe. |
+ subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, offset); |
+} |
+ |
+void SharedMemoryAllocator::CreateIterator(Iterator* state) { |
+ state->last = OFFSET_QUEUE; |
+ state->niter = 0; |
+} |
+ |
+int32_t SharedMemoryAllocator::GetNextIterable(Iterator* state, |
+ int32_t* type_id) { |
+ const BlockHeader* block = GetBlock(state->last, 0, 0, true); |
+ if (!block) // invalid iterator state |
+ return OFFSET_NULL; |
+ int32_t next = subtle::NoBarrier_Load(&block->next); |
Dmitry Vyukov
2015/11/04 13:52:28
this needs to be Acquire_Load, this is what acquir
bcwhite
2015/11/04 17:18:55
Whew! I've added a comment according to my unders
|
+ block = GetBlock(next, 0, 0, false); |
+ if (!block) // no next allocation in queue |
+ return OFFSET_NULL; |
+ |
+ // Memory corruption could cause a loop in the list. We need to detect |
+ // that so as to not cause an infinite loop in the caller. We do this |
+ // simply by making sure we don't iterate more than the absolute maximum |
+ // number of allocations that could have been made. Callers are likely |
+ // to loop multiple times before it is detected but at least it stops. |
+ int32_t freeptr = std::min(subtle::Acquire_Load(&shared_meta_->freeptr), |
Dmitry Vyukov
2015/11/04 13:52:29
visibility over what do we acquire here?
bcwhite
2015/11/04 17:18:55
There must be something I don't understand about a
Dmitry Vyukov
2015/11/05 10:49:42
Just atomic load is NoBarrier_Load.
Acquire/Releas
|
+ mem_size_); |
+ if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) { |
+ SetCorrupted(); |
+ return OFFSET_NULL; |
+ } |
+ |
+ state->last = next; |
+ state->niter++; |
+ *type_id = block->type_id; |
+ |
+ return next; |
+} |
+ |
+// The "corrupted" state is held both locally and globally (shared). The |
+// shared flag can't be trusted since a malicious actor could overwrite it. |
+// The local version is immune to foreign actors. Thus, if seen shared, |
+// copy it locally and, once known, always restore it globally. |
+void SharedMemoryAllocator::SetCorrupted() { |
+ LOG(ERROR) << "Corruption detected in shared-memory segment."; |
+ subtle::NoBarrier_Store(&corrupted_, 1); |
+ SetFlag(&shared_meta_->flags, kFlagCorrupted); |
+} |
+ |
+bool SharedMemoryAllocator::IsCorrupted() { |
+ if (subtle::NoBarrier_Load(&corrupted_) || |
+ CheckFlag(&shared_meta_->flags, kFlagCorrupted)) { |
+ SetCorrupted(); // Make sure all indicators are set. |
+ return true; |
+ } |
+ return false; |
+} |
+ |
+bool SharedMemoryAllocator::IsFull() { |
+ return CheckFlag(&shared_meta_->flags, kFlagFull); |
+} |
+ |
+// Dereference a block |offset| and ensure that it's valid for the desired |
+// |type_id| and |size|. |special| indicates that we may try to access block |
+// headers not available to callers but still accessed by this module. By |
+// having internal dereferences go through this same function, the allocator |
+// is hardened against corruption. |
+SharedMemoryAllocator::BlockHeader* SharedMemoryAllocator::GetBlock( |
+ int32_t offset, |
+ int32_t type_id, |
+ int32_t size, |
+ bool special) { |
Dmitry Vyukov
2015/11/04 13:52:29
Split special flag into two flags: one allows to g
bcwhite
2015/11/04 17:18:55
Done.
|
+ // Validation of parameters. |
+ if (offset % kAllocAlignment != 0) |
+ return nullptr; |
+ if (offset < (int)(special ? OFFSET_QUEUE : sizeof(SharedMetadata))) |
+ return nullptr; |
+ size += sizeof(BlockHeader); |
+ if (offset + size > mem_size_) |
+ return nullptr; |
+ int32_t freeptr = subtle::NoBarrier_Load(&shared_meta_->freeptr); |
+ if (offset + size > freeptr) |
+ return nullptr; |
+ |
+ // Validation of referenced block-header. |
+ const BlockHeader* block = reinterpret_cast<BlockHeader*>(mem_base_ + offset); |
+ if (block->size < size) |
+ return nullptr; |
+ if (!special && block->cookie != kBlockCookieAllocated) |
+ return nullptr; |
+ if (type_id != 0 && block->type_id != type_id) |
+ return nullptr; |
+ |
+ // Return pointer to block data. |
+ return reinterpret_cast<BlockHeader*>(mem_base_ + offset); |
+} |
+ |
+void* SharedMemoryAllocator::GetBlockData(int32_t offset, |
+ int32_t type_id, |
+ int32_t size, |
+ bool special) { |
+ DCHECK(size > 0); |
+ BlockHeader* block = GetBlock(offset, type_id, size, special); |
+ if (!block) |
+ return nullptr; |
+ return reinterpret_cast<char*>(block) + sizeof(BlockHeader); |
+} |
+ |
+} // namespace base |