Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(136)

Side by Side Diff: base/memory/shared_memory_allocator.cc

Issue 1410213004: Create "persistent memory allocator" for persisting and sharing objects. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: fixed compile problems; added paging test Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/memory/shared_memory_allocator.h ('k') | base/memory/shared_memory_allocator_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/shared_memory_allocator.h"
6
7 #include <assert.h>
8
9 #include "base/atomicops.h"
10 #include "base/logging.h"
11
12 // All allocations and data-structures must be aligned to this byte boundary.
13 #define ALLOC_ALIGNMENT 16
14
15 // The equivalest NULL value for an offset.
16 #define OFFSET_NULL 0
17
18 namespace {
19
20 // A constant (random) value placed in the shared meta-data to identify
21 // an already initialized memory segment.
22 const int32 GLOBAL_COOKIE = 0x408305DC;
23
24 // The current version of the meta-data. If updates are made that change
25 // the meta-data, the version number can be queried to operate in a backward-
26 // compatible manner until the memory segment is completely re-initalized.
27 const int32 GLOBAL_VERSION = 1;
28
29 // Constant values placed in the block headers to indicate its state.
30 const int32 BLOCK_COOKIE_FREE = 0;
31 const int32 BLOCK_COOKIE_QUEUE = 1;
32 const int32 BLOCK_COOKIE_WASTED = -1;
33 const int32 BLOCK_COOKIE_ALLOCATED = 0xC8799269;
34
35 } // namespace
36
37 namespace base {
38
39 struct SharedMemoryAllocator::BlockHeader {
40 int32 size; // number of bytes in this block, including header
41 int32 cookie; // constant value indicating completed allocation
42 int32 type; // a number provided by caller indicating data type
43 subtle::Atomic32 next; // pointer to the next block when iterating
44 };
45
46 struct SharedMemoryAllocator::SharedMetaData {
47 int32 cookie; // some value that indicates complete initialization
48 int32 size; // total size of memory segment
49 int32 version; // version code so upgrades don't break
50 subtle::Atomic32 freeptr; // offset to first free space in the segment
51 int32 reserved[2];
52 char corrupted; // flag indicating that corruption has been detected
53 char full; // flag indicating that alloc failed because segment is full
54 char flags[2]; // align to next int (not strictly needed but avoid confusion)
55
56 // The "iterable" queue is an M&S Queue as described here, append-only:
57 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
58 subtle::Atomic32 tailptr; // last block available for iteration
59 BlockHeader queue; // empty block for linked-list head/tail (must be last)
60 };
61
62 // The "queue" block header is used to detect "last node" so that zero/null
63 // can be used to indicate that it hasn't been added at all. It is part of
64 // the SharedMetaData structure which itself is always located at offset zero.
65 #define OFFSET_QUEUE offsetof(SharedMetaData, queue)
66
67 SharedMemoryAllocator::SharedMemoryAllocator(void* base, int32 size,
68 int32 page)
69 : shared_meta_(static_cast<SharedMetaData*>(base)),
70 mem_base_(static_cast<char*>(base)),
71 mem_size_(size),
72 mem_page_(page ? page : size),
73 last_seen_(0),
74 corrupted_(false) {
75 static_assert(sizeof(BlockHeader) % ALLOC_ALIGNMENT == 0,
76 "BlockHeader is not a multiple of ALLOC_ALIGNMENT");
77 static_assert(sizeof(SharedMetaData) % ALLOC_ALIGNMENT == 0,
78 "SharedMetaData is not a multiple of ALLOC_ALIGNMENT");
79
80 DCHECK(base && reinterpret_cast<uintptr_t>(base) % ALLOC_ALIGNMENT == 0);
81 DCHECK(size >= 1 << 10 && size <= 1 << 20 && // 1 KiB <= size <= 1 MiB
82 size % ALLOC_ALIGNMENT == 0);
83 DCHECK(page >= 0 && (page == 0 || size % page == 0));
84
85 if (shared_meta_->cookie != GLOBAL_COOKIE) {
86 // This block is only executed when a completely new memory segment is
87 // being initialized. It's unshared and single-threaded...
88 const BlockHeader* first_block = reinterpret_cast<BlockHeader*>(
89 mem_base_ + sizeof(SharedMetaData));
90 if (shared_meta_->cookie != 0 ||
91 shared_meta_->size != 0 ||
92 shared_meta_->version != 0 ||
93 shared_meta_->freeptr != 0 ||
94 shared_meta_->corrupted != 0 ||
95 shared_meta_->full != 0 ||
96 shared_meta_->tailptr != 0 ||
97 shared_meta_->queue.cookie != 0 ||
98 shared_meta_->queue.next != 0 ||
99 first_block->size != 0 ||
100 first_block->cookie != 0 ||
101 first_block->type != 0 ||
102 first_block->next != 0) {
103 // ...or something malicious has been playing with the meta-data.
104 SetCorrupted();
105 }
106
107 // This is still safe to do even if corruption has been detected.
108 shared_meta_->cookie = GLOBAL_COOKIE;
109 shared_meta_->size = size;
110 shared_meta_->version = GLOBAL_VERSION;
111 subtle::NoBarrier_Store(&shared_meta_->freeptr, sizeof(SharedMetaData));
112
113 // Set up the queue of iterable allocations.
114 shared_meta_->queue.size = sizeof(BlockHeader);
115 shared_meta_->queue.cookie = BLOCK_COOKIE_QUEUE;
116 subtle::NoBarrier_Store(&shared_meta_->queue.next, OFFSET_QUEUE);
117 subtle::NoBarrier_Store(&shared_meta_->tailptr, OFFSET_QUEUE);
118 }
119 }
120
121 SharedMemoryAllocator::~SharedMemoryAllocator() {
122 }
123
124 int32 SharedMemoryAllocator::Allocate(int32 size, int32 type) {
125 if (size < 0) {
126 NOTREACHED();
127 return OFFSET_NULL;
128 }
129
130 // Round up the requested size, plus header, to the next allocation alignment.
131 size += sizeof(BlockHeader);
132 size = (size + (ALLOC_ALIGNMENT - 1)) & ~(ALLOC_ALIGNMENT - 1);
133 if (size > mem_page_)
134 return OFFSET_NULL;
135
136 // Allocation is lockless so we do all our caculation and then, if saving
137 // indicates a change has occurred since we started, scrap everything and
138 // start over.
139 for (;;) {
140 if (IsCorrupted())
141 return OFFSET_NULL;
142
143 int32 freeptr = subtle::Acquire_Load(&shared_meta_->freeptr);
144 if (freeptr + size > mem_size_) {
145 shared_meta_->full = true;
146 return OFFSET_NULL;
147 }
148 BlockHeader* block = GetBlock(freeptr, 0, 0, true);
149 if (!block) {
150 SetCorrupted();
151 return OFFSET_NULL;
152 }
153
154 // An allocation cannot cross page boundaries. If it would, create a
155 // "wasted" block and begin again at the top of the next page.
156 int32 page_free = mem_page_ - freeptr % mem_page_;
157 if (size > page_free) {
158 int32 new_freeptr = freeptr + page_free;
159 if (subtle::Release_CompareAndSwap(
160 &shared_meta_->freeptr, freeptr, new_freeptr) == freeptr) {
161 block->size = page_free;
162 block->cookie = BLOCK_COOKIE_WASTED;
163 }
164 continue;
165 }
166
167 // Don't leave a slice at the end of a page too small for anything.
168 if (page_free - size < (int)(sizeof(BlockHeader) + ALLOC_ALIGNMENT))
169 size = page_free;
170
171 int32 new_freeptr = freeptr + size;
172 if (new_freeptr > mem_size_) {
173 SetCorrupted();
174 return OFFSET_NULL;
175 }
176
177 if (subtle::Release_CompareAndSwap(
178 &shared_meta_->freeptr, freeptr, new_freeptr) != freeptr) {
179 // Another thread must have completed an allocation while we were working.
180 // Try again.
181 continue;
182 }
183
184 // Since allocating a block is atomic and all unallocated memory must be
185 // zeros, any other value indicates that something has run amuck.
186 if (block->size != 0 ||
187 block->cookie != BLOCK_COOKIE_FREE ||
188 block->type != 0 ||
189 subtle::NoBarrier_Load(&block->next) != 0) {
190 SetCorrupted();
191 return OFFSET_NULL;
192 }
193
194 block->size = size;
195 block->cookie = BLOCK_COOKIE_ALLOCATED;
196 block->type = type;
197 return freeptr;
198 }
199 }
200
201 void SharedMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) {
202 int32 remaining = mem_size_ - subtle::NoBarrier_Load(&shared_meta_->freeptr);
203 meminfo->total = mem_size_;
204 meminfo->free = shared_meta_->corrupted ? 0 : remaining - sizeof(BlockHeader);
205 }
206
207 void SharedMemoryAllocator::MakeIterable(int32 offset) {
208 if (IsCorrupted())
209 return;
210 BlockHeader* block = GetBlock(offset, 0, 0, false);
211 if (!block) // invalid offset
212 return;
213 if (subtle::NoBarrier_Load(&block->next) != 0) // previously set iterable
214 return;
215 subtle::NoBarrier_Store(&block->next, OFFSET_QUEUE); // will be tail block
216
217 // Try to add this block to the tail of the queue. May take multiple tries.
218 int32 tail;
219 for (;;) {
220 tail = subtle::Acquire_Load(&shared_meta_->tailptr);
221 block = GetBlock(tail, 0, 0, true);
222 if (!block) {
223 SetCorrupted();
224 return;
225 }
226 int32 next = subtle::NoBarrier_Load(&block->next);
227
228 // Ensure that the tail pointer didn't change while reading next.
229 if (tail == subtle::Release_Load(&shared_meta_->tailptr)) {
230 // Check if the found block is truely the last in the queue (i.e. it
231 // points back to the "queue" node).
232 if (next == OFFSET_QUEUE) {
233 // Yes. Try to append the passed block after the current tail block.
234 if (subtle::Release_CompareAndSwap(
235 &block->next, OFFSET_QUEUE, offset) == OFFSET_QUEUE) {
236 // Success! The block is enqueued; need to update the tail pointer.
237 break;
238 }
239 } else {
240 // No. Another thread has stopped between the block-next update
241 // and the tail-pointer update. Try to update tailptr past the
242 // found block. That other thread may complete it first or it
243 // may have crashed. Be fail-safe.
244 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, next);
245 }
246 }
247 }
248
249 // Block has been enqueued. Now update the tail-pointer past it. This
250 // could fail if another thread has already completed the operation as
251 // part of being fail-safe.
252 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, offset);
253 }
254
255 int32 SharedMemoryAllocator::GetFirstIterable(Iterator* state, int32* type) {
256 state->last = OFFSET_QUEUE;
257 return GetNextIterable(state, type);
258 }
259
260 int32 SharedMemoryAllocator::GetNextIterable(Iterator* state, int32* type) {
261 const BlockHeader* block = GetBlock(state->last, 0, 0, true);
262 if (!block) // invalid iterator state
263 return OFFSET_NULL;
264 int32 next = subtle::NoBarrier_Load(&block->next);
265 block = GetBlock(next, 0, 0, false);
266 if (!block) // no next allocation in queue
267 return OFFSET_NULL;
268
269 state->last = next;
270 *type = block->type;
271 return next;
272 }
273
274 void SharedMemoryAllocator::SetCorrupted() {
275 LOG(ERROR) << "Corruption detected in shared-memory segment.";
276 corrupted_ = true;
Alexander Potapenko 2015/10/30 06:53:13 Can SetCorrupted() and IsCorrupted() be invoked co
bcwhite 2015/10/30 14:01:09 It is, but not an important one. Correct operatio
277 shared_meta_->corrupted = true;
278 }
279
280 bool SharedMemoryAllocator::IsCorrupted() {
281 if (corrupted_ || shared_meta_->corrupted) {
282 SetCorrupted(); // Make sure all indicators are set.
283 return true;
284 }
285 return false;
286 }
287
288 bool SharedMemoryAllocator::IsFull() {
289 return shared_meta_->full != 0;
290 }
291
292 SharedMemoryAllocator::BlockHeader* SharedMemoryAllocator::GetBlock(
293 int32 offset, int32 type, int32 size, bool special) {
294 // Validation of parameters.
295 if (offset % ALLOC_ALIGNMENT != 0)
296 return nullptr;
297 if (offset < (int)(special ? OFFSET_QUEUE : sizeof(SharedMetaData)))
298 return nullptr;
299 size += sizeof(BlockHeader);
300 if (offset + size > mem_size_)
301 return nullptr;
302 int32 freeptr = subtle::NoBarrier_Load(&shared_meta_->freeptr);
303 if (offset + size > freeptr + (int)(special ? sizeof(BlockHeader) : 0))
304 return nullptr;
305
306 // Validation of referenced block-header.
307 const BlockHeader* block = reinterpret_cast<BlockHeader*>(mem_base_ + offset);
308 if (offset != freeptr && block->size < size)
309 return nullptr;
310 if (!special && block->cookie != BLOCK_COOKIE_ALLOCATED)
311 return nullptr;
312 if (type != 0 && block->type != type)
313 return nullptr;
314
315 // Return pointer to block data.
316 return reinterpret_cast<BlockHeader*>(mem_base_ + offset);
317 }
318
319 void* SharedMemoryAllocator::GetBlockData(int32 offset, int32 type,
320 int32 size, bool special) {
321 DCHECK(size > 0);
322 BlockHeader* block = GetBlock(offset, type, size, special);
323 if (!block)
324 return nullptr;
325 return reinterpret_cast<char*>(block) + sizeof(BlockHeader);
326 }
327
328 } // namespace base
OLDNEW
« no previous file with comments | « base/memory/shared_memory_allocator.h ('k') | base/memory/shared_memory_allocator_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698