Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(776)

Side by Side Diff: base/memory/shared_memory_allocator.cc

Issue 1410213004: Create "persistent memory allocator" for persisting and sharing objects. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: some 'git cl format' changes and remove double-space after periods Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/shared_memory_allocator.h"
6
7 #include <assert.h>
8 #include <algorithm>
9
10 #include "base/logging.h"
11
12 // All integer constants in this file are signed because Atomic32 is signed
13 // and keeping all others consistent with this avoids a lot of unnecessary
14 // casting to avoid signed/unsigned operations just to avoid compiler errors.
15 // This means an occasonal cast of a constant from sizeof() to "int" but
16 // is far simpler than the alternative.
17
18 namespace {
19
20 // All allocations and data-structures must be aligned to this byte boundary.
21 // Alignment as large as the physical bus between CPU and RAM is _required_
22 // for some architectures, is simply more efficient on other CPUs, and
23 // generally a Good Idea(tm) for all platforms as it reduces/eliminates the
24 // chance that a type will span cache lines. Alignment mustn't be less
25 // than 8 to ensure proper alignment for all types. The rest is a balance
26 // between reducing spans across multiple cache lines and wasted space spent
27 // padding out allocations. An alignment of 16 would ensure that the block
28 // header structure always sits in a single cache line. An average of about
29 // 1/2 this value will be wasted with every allocation.
30 const int32_t kAllocAlignment = 8;
31
32 // A constant (random) value placed in the shared metadata to identify
33 // an already initialized memory segment.
34 const int32_t kGlobalCookie = 0x408305DC;
35
36 // The current version of the metadata. If updates are made that change
37 // the metadata, the version number can be queried to operate in a backward-
38 // compatible manner until the memory segment is completely re-initalized.
39 const int32_t kGlobalVersion = 1;
40
41 // Constant values placed in the block headers to indicate its state.
42 const int32_t kBlockCookieFree = 0;
43 const int32_t kBlockCookieQueue = 1;
44 const int32_t kBlockCookieWasted = -1;
45 const int32_t kBlockCookieAllocated = 0xC8799269;
46
47 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
48 // types rather than combined bitfield.
49
50 enum {
51 kFlagCorrupted,
52 kFlagFull
53 };
54
55 bool CheckFlag(base::subtle::Atomic32* flags, int flag) {
56 base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags);
57 return (loaded_flags & 1 << flag) != 0;
58 }
59
60 void SetFlag(base::subtle::Atomic32* flags, int flag) {
61 for (;;) {
62 base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags);
63 base::subtle::Atomic32 new_flags =
64 (loaded_flags & ~(1 << flag)) | (1 << flag);
65 if (base::subtle::Release_CompareAndSwap(
66 flags, loaded_flags, new_flags) == loaded_flags) {
67 break;
68 }
69 }
70 }
71
72 } // namespace
73
74 namespace base {
75
76 // The block-header is placed at the top of every allocation within the
77 // segment to describe the data that follows it.
78 struct SharedMemoryAllocator::BlockHeader {
79 int32_t size; // Number of bytes in this block, including header.
80 int32_t cookie; // Constant value indicating completed allocation.
81 int32_t type; // A number provided by caller indicating data type.
82 subtle::Atomic32 next; // Pointer to the next block when iterating.
83 };
84
85 // The shared metadata exists once at the top of the memory segment to
86 // describe the state of the allocator to all processes.
87 struct SharedMemoryAllocator::SharedMetadata {
88 int32_t cookie; // Some value that indicates complete initialization.
89 int32_t size; // Total size of memory segment.
90 int32_t page_size; // Paging size within memory segment.
91 int32_t version; // Version code so upgrades don't break.
92 subtle::Atomic32 freeptr; // Offset to first free space in the segment.
93 subtle::Atomic32 flags; // Bitfield of information flags.
94 int32_t reserved; // Padding to ensure size is multiple of alignment.
95
96 // The "iterable" queue is an M&S Queue as described here, append-only:
97 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
98 subtle::Atomic32 tailptr; // Last block available for iteration.
99 BlockHeader queue; // Empty block for linked-list head/tail. (must be last)
100 };
101
102 // The "queue" block header is used to detect "last node" so that zero/null
103 // can be used to indicate that it hasn't been added at all. It is part of
104 // the SharedMetadata structure which itself is always located at offset zero.
105 // This can't be a constant because SharedMetadata is a private definition.
106 #define OFFSET_QUEUE offsetof(SharedMetadata, queue)
107 #define OFFSET_NULL 0 // the equivalest NULL value for an offset
108
109 SharedMemoryAllocator::SharedMemoryAllocator(void* base,
110 int32_t size,
111 int32_t page_size)
112 : shared_meta_(static_cast<SharedMetadata*>(base)),
113 mem_base_(static_cast<char*>(base)),
114 mem_size_(size),
115 mem_page_(page_size ? page_size : size),
116 last_seen_(0),
117 corrupted_(0) {
118 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
119 "BlockHeader is not a multiple of kAllocAlignment");
120 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
121 "SharedMetadata is not a multiple of kAllocAlignment");
122
123 CHECK(base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0);
124 CHECK(size >= 1 << 10 && size <= 1 << 20 && // 1 KiB <= size <= 1 MiB
125 size % kAllocAlignment == 0);
126 CHECK(page_size >= 0 && (page_size == 0 || size % page_size == 0));
127
128 if (shared_meta_->cookie != kGlobalCookie) {
129 // This block is only executed when a completely new memory segment is
130 // being initialized. It's unshared and single-threaded...
131 const BlockHeader* first_block = reinterpret_cast<BlockHeader*>(
132 mem_base_ + sizeof(SharedMetadata));
133 if (shared_meta_->cookie != 0 ||
134 shared_meta_->size != 0 ||
135 shared_meta_->version != 0 ||
136 subtle::NoBarrier_Load(&shared_meta_->freeptr) != 0 ||
137 subtle::NoBarrier_Load(&shared_meta_->flags) != 0 ||
138 shared_meta_->tailptr != 0 ||
139 shared_meta_->queue.cookie != 0 ||
140 subtle::NoBarrier_Load(&shared_meta_->queue.next) != 0 ||
141 first_block->size != 0 ||
142 first_block->cookie != 0 ||
143 first_block->type != 0 ||
144 first_block->next != 0) {
145 // ...or something malicious has been playing with the metadata.
146 NOTREACHED();
147 SetCorrupted();
148 }
149
150 // This is still safe to do even if corruption has been detected.
151 shared_meta_->cookie = kGlobalCookie;
152 shared_meta_->size = size;
153 shared_meta_->page_size = page_size;
154 shared_meta_->version = kGlobalVersion;
155 subtle::NoBarrier_Store(&shared_meta_->freeptr, sizeof(SharedMetadata));
156
157 // Set up the queue of iterable allocations.
158 shared_meta_->queue.size = sizeof(BlockHeader);
159 shared_meta_->queue.cookie = kBlockCookieQueue;
160 subtle::NoBarrier_Store(&shared_meta_->queue.next, OFFSET_QUEUE);
161 subtle::NoBarrier_Store(&shared_meta_->tailptr, OFFSET_QUEUE);
162 } else {
163 // The allocator is attaching to a previously initialized segment of
164 // memory. Make sure the embedded data matches what has been passed.
165 if (shared_meta_->size != size || shared_meta_->page_size != page_size) {
166 NOTREACHED();
167 SetCorrupted();
168 }
169 }
170 }
171
172 SharedMemoryAllocator::~SharedMemoryAllocator() {}
173
174 int32_t SharedMemoryAllocator::Allocate(int32_t size, int32_t type) {
175 if (size < 0) {
176 NOTREACHED();
177 return OFFSET_NULL;
178 }
179
180 // Round up the requested size, plus header, to the next allocation alignment.
181 size += sizeof(BlockHeader);
182 size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
183 if (size > mem_page_)
184 return OFFSET_NULL;
185
186 // Allocation is lockless so we do all our caculation and then, if saving
187 // indicates a change has occurred since we started, scrap everything and
188 // start over.
189 for (;;) {
190 if (IsCorrupted())
191 return OFFSET_NULL;
192
193 int32_t freeptr = subtle::Acquire_Load(&shared_meta_->freeptr);
194 if (freeptr + size > mem_size_) {
195 SetFlag(&shared_meta_->flags, kFlagFull);
196 return OFFSET_NULL;
197 }
198
199 // Get pointer to the "free" block. It doesn't even have a header; pass
200 // -sizeof(header) so accouting for that will yield an expected size of
201 // zero which is what will be stored at that location. If something
202 // has been allocated since the load of freeptr above, it is still safe
203 // as nothing will be written to that location until after the CAS below.
204 BlockHeader* block = GetBlock(freeptr, 0, -(int)sizeof(BlockHeader), true);
205 if (!block) {
206 SetCorrupted();
207 return OFFSET_NULL;
208 }
209
210 // An allocation cannot cross page boundaries. If it would, create a
211 // "wasted" block and begin again at the top of the next page.
212 int32_t page_free = mem_page_ - freeptr % mem_page_;
213 if (size > page_free) {
214 int32_t new_freeptr = freeptr + page_free;
215 if (subtle::Release_CompareAndSwap(
216 &shared_meta_->freeptr, freeptr, new_freeptr) == freeptr) {
217 block->size = page_free;
218 block->cookie = kBlockCookieWasted;
219 }
220 continue;
221 }
222
223 // Don't leave a slice at the end of a page too small for anything. This
224 // can result in an allocation up to two alignment-sizes greater than the
225 // minimum required by requested-size + header + alignment.
226 if (page_free - size < (int)(sizeof(BlockHeader) + kAllocAlignment))
227 size = page_free;
228
229 int32_t new_freeptr = freeptr + size;
230 if (new_freeptr > mem_size_) {
231 SetCorrupted();
232 return OFFSET_NULL;
233 }
234
235 if (subtle::Release_CompareAndSwap(
236 &shared_meta_->freeptr, freeptr, new_freeptr) != freeptr) {
237 // Another thread must have completed an allocation while we were working.
238 // Try again.
239 continue;
240 }
241
242 // Given that all memory was zeroed before ever being given to an instance
243 // of this class and given that we only allocate in a monotomic fashion
244 // going forward, it must be that the newly allocated block is completely
245 // full of zeros. If we find anything in the block header that is NOT a
246 // zero then something must have previously run amuck through memory,
247 // writing beyond the allocated space and into unallocated space.
248 if (block->size != 0 ||
249 block->cookie != kBlockCookieFree ||
250 block->type != 0 ||
251 subtle::NoBarrier_Load(&block->next) != 0) {
252 SetCorrupted();
253 return OFFSET_NULL;
254 }
255
256 block->size = size;
257 block->cookie = kBlockCookieAllocated;
258 block->type = type;
259 return freeptr;
260 }
261 }
262
263 void SharedMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) {
264 int32_t remaining =
265 mem_size_ - subtle::NoBarrier_Load(&shared_meta_->freeptr);
266 meminfo->total = mem_size_;
267 meminfo->free = IsCorrupted() ? 0 : remaining - sizeof(BlockHeader);
268 }
269
270 void SharedMemoryAllocator::MakeIterable(int32_t offset) {
271 if (IsCorrupted())
272 return;
273 BlockHeader* block = GetBlock(offset, 0, 0, false);
274 if (!block) // invalid offset
275 return;
276 if (subtle::NoBarrier_Load(&block->next) != 0) // previously set iterable
277 return;
278 subtle::NoBarrier_Store(&block->next, OFFSET_QUEUE); // will be tail block
279
280 // Try to add this block to the tail of the queue. May take multiple tries.
281 int32_t tail;
282 for (;;) {
283 tail = subtle::Acquire_Load(&shared_meta_->tailptr);
284 block = GetBlock(tail, 0, 0, true);
285 if (!block) {
286 SetCorrupted();
287 return;
288 }
289 int32_t next = subtle::NoBarrier_Load(&block->next);
290
291 // Ensure that the tail pointer didn't change while reading next. Only
292 // the read of the tail pointer is atomic but we need to read both the
293 // tail pointer and the next pointer from it in an atomic fashion. The
294 // way to do this is to read both non-atomically and then verify after
295 // the second read that the first read is still valid/unchanged.
296 if (tail == subtle::Release_Load(&shared_meta_->tailptr)) {
297 // Check if the found block is truely the last in the queue (i.e. it
298 // points back to the "queue" node).
299 if (next == OFFSET_QUEUE) {
300 // Yes. Try to append the passed block after the current tail block.
301 if (subtle::Release_CompareAndSwap(
302 &block->next, OFFSET_QUEUE, offset) == OFFSET_QUEUE) {
303 // Success! The block is enqueued; need to update the tail pointer.
304 break;
305 }
306 } else {
307 // No. Another thread has stopped between the block-next update
308 // and the tail-pointer update. Try to update tailptr past the
309 // found block. That other thread may complete it first or it
310 // may have crashed. Be fail-safe.
311 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, next);
312 }
313 }
314 }
315
316 // Block has been enqueued. Now update the tail-pointer past it. This
317 // could fail if another thread has already completed the operation as
318 // part of being fail-safe.
319 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, offset);
320 }
321
322 void SharedMemoryAllocator::CreateIterator(Iterator* state) {
323 state->last = OFFSET_QUEUE;
324 state->niter = 0;
325 }
326
327 int32_t SharedMemoryAllocator::GetNextIterable(Iterator* state, int32_t* type) {
328 const BlockHeader* block = GetBlock(state->last, 0, 0, true);
329 if (!block) // invalid iterator state
330 return OFFSET_NULL;
331 int32_t next = subtle::NoBarrier_Load(&block->next);
332 block = GetBlock(next, 0, 0, false);
333 if (!block) // no next allocation in queue
334 return OFFSET_NULL;
335
336 // Memory corruption could cause a loop in the list. We need to detect
337 // that so as to not cause an infinite loop in the caller. We do this
338 // simply by making sure we don't iterate more than the absolute maximum
339 // number of allocations that could have been made. Callers are likely
340 // to loop multiple times before it is detected but at least it stops.
341 int32_t freeptr = std::min(subtle::Acquire_Load(&shared_meta_->freeptr),
342 mem_size_);
343 if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) {
344 SetCorrupted();
345 return OFFSET_NULL;
346 }
347
348 state->last = next;
349 state->niter++;
350 *type = block->type;
351
352 return next;
353 }
354
355 // The "corrupted" state is held both locally and globally (shared). The
356 // shared flag can't be trusted since a malicious actor could overwrite it.
357 // The local version is immune to foreign actors. Thus, if seen shared,
358 // copy it locally and, once known, always restore it globally.
359 void SharedMemoryAllocator::SetCorrupted() {
360 LOG(ERROR) << "Corruption detected in shared-memory segment.";
361 subtle::NoBarrier_Store(&corrupted_, 1);
362 SetFlag(&shared_meta_->flags, kFlagCorrupted);
363 }
364
365 bool SharedMemoryAllocator::IsCorrupted() {
366 if (subtle::NoBarrier_Load(&corrupted_) ||
367 CheckFlag(&shared_meta_->flags, kFlagCorrupted)) {
368 SetCorrupted(); // Make sure all indicators are set.
369 return true;
370 }
371 return false;
372 }
373
374 bool SharedMemoryAllocator::IsFull() {
375 return CheckFlag(&shared_meta_->flags, kFlagFull);
376 }
377
378 // Dereference a block |offset| and ensure that it's valid for the desired
379 // |type| and |size|. |special| indicates that we may try to access block
380 // headers not available to callers but still accessed by this module. By
381 // having internal dereferences go through this same function, the allocator
382 // is hardened against corruption.
383 SharedMemoryAllocator::BlockHeader* SharedMemoryAllocator::GetBlock(
384 int32_t offset,
385 int32_t type,
386 int32_t size,
387 bool special) {
388 // Validation of parameters.
389 if (offset % kAllocAlignment != 0)
390 return nullptr;
391 if (offset < (int)(special ? OFFSET_QUEUE : sizeof(SharedMetadata)))
392 return nullptr;
393 size += sizeof(BlockHeader);
394 if (offset + size > mem_size_)
395 return nullptr;
396 int32_t freeptr = subtle::NoBarrier_Load(&shared_meta_->freeptr);
397 if (offset + size > freeptr)
398 return nullptr;
399
400 // Validation of referenced block-header.
401 const BlockHeader* block = reinterpret_cast<BlockHeader*>(mem_base_ + offset);
402 if (block->size < size)
403 return nullptr;
404 if (!special && block->cookie != kBlockCookieAllocated)
405 return nullptr;
406 if (type != 0 && block->type != type)
407 return nullptr;
408
409 // Return pointer to block data.
410 return reinterpret_cast<BlockHeader*>(mem_base_ + offset);
411 }
412
413 void* SharedMemoryAllocator::GetBlockData(int32_t offset,
414 int32_t type,
415 int32_t size,
416 bool special) {
417 DCHECK(size > 0);
418 BlockHeader* block = GetBlock(offset, type, size, special);
419 if (!block)
420 return nullptr;
421 return reinterpret_cast<char*>(block) + sizeof(BlockHeader);
422 }
423
424 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698