Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(105)

Side by Side Diff: base/memory/shared_memory_allocator.cc

Issue 1410213004: Create "persistent memory allocator" for persisting and sharing objects. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: rebased Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/memory/shared_memory_allocator.h ('k') | base/memory/shared_memory_allocator_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/shared_memory_allocator.h"
6
7 #include <assert.h>
8 #include <algorithm>
9
10 #include "base/logging.h"
11
12 // All integer constants in this file are signed because Atomic32 is signed
13 // and keeping all others consistent with this avoids a lot of unnecessary
14 // casting to avoid signed/unsigned operations just to avoid compiler errors.
15 // This means an occasonal cast of a constant from sizeof() to "int" but
16 // is far simpler than the alternative.
17
18 namespace {
19
20 // All allocations and data-structures must be aligned to this byte boundary.
21 // Alignment as large as the physical bus between CPU and RAM is _required_
22 // for some architectures, is simply more efficient on other CPUs, and
23 // generally a Good Idea(tm) for all platforms as it reduces/eliminates the
24 // chance that a type will span cache lines. Alignment mustn't be less
25 // than 8 to ensure proper alignment for all types. The rest is a balance
26 // between reducing spans across multiple cache lines and wasted space spent
27 // padding out allocations. An alignment of 16 would ensure that the block
28 // header structure always sits in a single cache line. An average of about
29 // 1/2 this value will be wasted with every allocation.
30 const int32_t kAllocAlignment = 8;
31
32 // A constant (random) value placed in the shared metadata to identify
33 // an already initialized memory segment.
34 const int32_t kGlobalCookie = 0x408305DC;
35
36 // The current version of the metadata. If updates are made that change
37 // the metadata, the version number can be queried to operate in a backward-
38 // compatible manner until the memory segment is completely re-initalized.
39 const int32_t kGlobalVersion = 1;
40
41 // Constant values placed in the block headers to indicate its state.
42 const int32_t kBlockCookieFree = 0;
43 const int32_t kBlockCookieQueue = 1;
44 const int32_t kBlockCookieWasted = -1;
45 const int32_t kBlockCookieAllocated = 0xC8799269;
46
47 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
48 // types rather than combined bitfield.
49
50 enum {
51 kFlagCorrupted,
52 kFlagFull
53 };
54
55 bool CheckFlag(base::subtle::Atomic32* flags, int flag) {
56 base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags);
57 return (loaded_flags & 1 << flag) != 0;
58 }
59
60 void SetFlag(base::subtle::Atomic32* flags, int flag) {
61 for (;;) {
62 base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags);
63 base::subtle::Atomic32 new_flags =
64 (loaded_flags & ~(1 << flag)) | (1 << flag);
65 if (base::subtle::Release_CompareAndSwap(
66 flags, loaded_flags, new_flags) == loaded_flags) {
67 break;
68 }
69 }
70 }
71
72 } // namespace
73
74 namespace base {
75
76 // The block-header is placed at the top of every allocation within the
77 // segment to describe the data that follows it.
78 struct SharedMemoryAllocator::BlockHeader {
79 int32_t size; // Number of bytes in this block, including header.
80 int32_t cookie; // Constant value indicating completed allocation.
81 int32_t type_id; // A number provided by caller indicating data type.
82 subtle::Atomic32 next; // Pointer to the next block when iterating.
83 };
84
85 // The shared metadata exists once at the top of the memory segment to
86 // describe the state of the allocator to all processes.
87 struct SharedMemoryAllocator::SharedMetadata {
88 int32_t cookie; // Some value that indicates complete initialization.
89 int32_t size; // Total size of memory segment.
90 int32_t page_size; // Paging size within memory segment.
91 int32_t version; // Version code so upgrades don't break.
92 subtle::Atomic32 freeptr; // Offset to first free space in the segment.
93 subtle::Atomic32 flags; // Bitfield of information flags.
94 int32_t reserved; // Padding to ensure size is multiple of alignment.
95
96 // The "iterable" queue is an M&S Queue as described here, append-only:
97 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
98 subtle::Atomic32 tailptr; // Last block available for iteration.
99 BlockHeader queue; // Empty block for linked-list head/tail. (must be last)
100 };
101
102 // The "queue" block header is used to detect "last node" so that zero/null
103 // can be used to indicate that it hasn't been added at all. It is part of
104 // the SharedMetadata structure which itself is always located at offset zero.
105 // This can't be a constant because SharedMetadata is a private definition.
106 #define OFFSET_QUEUE offsetof(SharedMetadata, queue)
107 #define OFFSET_NULL 0 // the equivalest NULL value for an offset
108
109 SharedMemoryAllocator::SharedMemoryAllocator(void* base,
110 int32_t size,
111 int32_t page_size)
112 : shared_meta_(static_cast<SharedMetadata*>(base)),
113 mem_base_(static_cast<char*>(base)),
114 mem_size_(size),
115 mem_page_(page_size ? page_size : size),
116 corrupted_(0) {
117 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
118 "BlockHeader is not a multiple of kAllocAlignment");
119 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
120 "SharedMetadata is not a multiple of kAllocAlignment");
121
122 CHECK(base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0);
123 CHECK(size >= 1 << 10 && size <= 1 << 20 && // 1 KiB <= size <= 1 MiB
124 size % kAllocAlignment == 0);
125 CHECK(page_size >= 0 && (page_size == 0 || size % page_size == 0));
126
127 if (shared_meta_->cookie != kGlobalCookie) {
128 // This block is only executed when a completely new memory segment is
129 // being initialized. It's unshared and single-threaded...
130 const BlockHeader* first_block = reinterpret_cast<BlockHeader*>(
131 mem_base_ + sizeof(SharedMetadata));
132 if (shared_meta_->cookie != 0 ||
133 shared_meta_->size != 0 ||
134 shared_meta_->version != 0 ||
135 subtle::NoBarrier_Load(&shared_meta_->freeptr) != 0 ||
136 subtle::NoBarrier_Load(&shared_meta_->flags) != 0 ||
137 shared_meta_->tailptr != 0 ||
138 shared_meta_->queue.cookie != 0 ||
139 subtle::NoBarrier_Load(&shared_meta_->queue.next) != 0 ||
140 first_block->size != 0 ||
141 first_block->cookie != 0 ||
142 first_block->type_id != 0 ||
143 first_block->next != 0) {
144 // ...or something malicious has been playing with the metadata.
145 NOTREACHED();
146 SetCorrupted();
147 }
148
149 // This is still safe to do even if corruption has been detected.
150 shared_meta_->cookie = kGlobalCookie;
151 shared_meta_->size = size;
152 shared_meta_->page_size = page_size;
153 shared_meta_->version = kGlobalVersion;
154 subtle::NoBarrier_Store(&shared_meta_->freeptr, sizeof(SharedMetadata));
155
156 // Set up the queue of iterable allocations.
157 shared_meta_->queue.size = sizeof(BlockHeader);
158 shared_meta_->queue.cookie = kBlockCookieQueue;
159 subtle::NoBarrier_Store(&shared_meta_->queue.next, OFFSET_QUEUE);
160 subtle::NoBarrier_Store(&shared_meta_->tailptr, OFFSET_QUEUE);
161 } else {
162 // The allocator is attaching to a previously initialized segment of
163 // memory. Make sure the embedded data matches what has been passed.
164 if (shared_meta_->size != size || shared_meta_->page_size != page_size) {
165 NOTREACHED();
166 SetCorrupted();
167 }
168 }
169 }
170
171 SharedMemoryAllocator::~SharedMemoryAllocator() {}
172
173 int32_t SharedMemoryAllocator::Allocate(int32_t size, int32_t type_id) {
174 if (size < 0) {
Dmitry Vyukov 2015/11/04 13:52:29 check that size != 0 as well in GetNextIterable an
bcwhite 2015/11/04 17:18:55 Done.
175 NOTREACHED();
176 return OFFSET_NULL;
177 }
178
179 // Round up the requested size, plus header, to the next allocation alignment.
180 size += sizeof(BlockHeader);
Dmitry Vyukov 2015/11/04 13:52:29 check for overflow, rendered can pass INT_MAX-1 no
bcwhite 2015/11/04 17:18:55 Done.
181 size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
182 if (size > mem_page_)
Dmitry Vyukov 2015/11/04 13:52:29 check that size <= page_size
183 return OFFSET_NULL;
184
185 // Allocation is lockless so we do all our caculation and then, if saving
186 // indicates a change has occurred since we started, scrap everything and
187 // start over.
188 for (;;) {
189 if (IsCorrupted())
190 return OFFSET_NULL;
191
192 int32_t freeptr = subtle::Acquire_Load(&shared_meta_->freeptr);
Dmitry Vyukov 2015/11/04 13:52:29 What do we acquire here? Where is the pairing rele
bcwhite 2015/11/04 17:18:55 It's the CAS on line 214 or 234 (only one is execu
Dmitry Vyukov 2015/11/05 10:49:42 Acquire or release operation that is not paired wi
bcwhite 2015/11/05 14:37:15 I've been thinking about acquire/release in terms
Dmitry Vyukov 2015/11/05 16:38:12 Correct.
bcwhite 2015/11/05 17:06:30 Ahhh! So it's not that specific value we're acqui
193 if (freeptr + size > mem_size_) {
194 SetFlag(&shared_meta_->flags, kFlagFull);
195 return OFFSET_NULL;
196 }
197
198 // Get pointer to the "free" block. It doesn't even have a header; pass
199 // -sizeof(header) so accouting for that will yield an expected size of
200 // zero which is what will be stored at that location. If something
201 // has been allocated since the load of freeptr above, it is still safe
202 // as nothing will be written to that location until after the CAS below.
203 BlockHeader* block = GetBlock(freeptr, 0, -(int)sizeof(BlockHeader), true);
204 if (!block) {
205 SetCorrupted();
206 return OFFSET_NULL;
207 }
208
209 // An allocation cannot cross page boundaries. If it would, create a
210 // "wasted" block and begin again at the top of the next page.
211 int32_t page_free = mem_page_ - freeptr % mem_page_;
212 if (size > page_free) {
Dmitry Vyukov 2015/11/04 13:52:29 %K returns value in [0, K), not [1, K] check for p
bcwhite 2015/11/04 17:18:55 I want [0, K). If "freeptr" points to the start o
Dmitry Vyukov 2015/11/04 17:33:07 I may be missing something then. If we get page_fr
bcwhite 2015/11/04 18:40:16 mem_page_ > 0 therefore freeptr % mem_page_ < mem
Dmitry Vyukov 2015/11/05 10:49:42 Aha! I missed "mem_page_ - " part. Sorry.
213 int32_t new_freeptr = freeptr + page_free;
214 if (subtle::Release_CompareAndSwap(
Dmitry Vyukov 2015/11/04 13:52:29 What do we release here? Where is the pairing acqu
215 &shared_meta_->freeptr, freeptr, new_freeptr) == freeptr) {
216 block->size = page_free;
Dmitry Vyukov 2015/11/04 13:52:29 Why do we need this? We not don't iterate the regi
bcwhite 2015/11/04 17:18:55 I suppose it could be omitted now. It was part of
Dmitry Vyukov 2015/11/04 17:33:08 I don't object too much. But then add a comment. F
bcwhite 2015/11/04 18:40:16 Done.
217 block->cookie = kBlockCookieWasted;
218 }
219 continue;
220 }
221
222 // Don't leave a slice at the end of a page too small for anything. This
223 // can result in an allocation up to two alignment-sizes greater than the
224 // minimum required by requested-size + header + alignment.
225 if (page_free - size < (int)(sizeof(BlockHeader) + kAllocAlignment))
226 size = page_free;
227
228 int32_t new_freeptr = freeptr + size;
229 if (new_freeptr > mem_size_) {
230 SetCorrupted();
231 return OFFSET_NULL;
232 }
233
234 if (subtle::Release_CompareAndSwap(
Dmitry Vyukov 2015/11/04 13:52:29 What do we release here? Where is the pairing acqu
235 &shared_meta_->freeptr, freeptr, new_freeptr) != freeptr) {
236 // Another thread must have completed an allocation while we were working.
237 // Try again.
238 continue;
239 }
240
241 // Given that all memory was zeroed before ever being given to an instance
242 // of this class and given that we only allocate in a monotomic fashion
243 // going forward, it must be that the newly allocated block is completely
244 // full of zeros. If we find anything in the block header that is NOT a
245 // zero then something must have previously run amuck through memory,
246 // writing beyond the allocated space and into unallocated space.
247 if (block->size != 0 ||
248 block->cookie != kBlockCookieFree ||
249 block->type_id != 0 ||
250 subtle::NoBarrier_Load(&block->next) != 0) {
251 SetCorrupted();
252 return OFFSET_NULL;
253 }
254
255 block->size = size;
Dmitry Vyukov 2015/11/04 13:52:29 These should be atomic stores as they race with al
bcwhite 2015/11/04 17:18:55 You mean the checks on lines 247-250?
Dmitry Vyukov 2015/11/04 17:33:08 I mean checks in GetBlock done by another thread w
256 block->cookie = kBlockCookieAllocated;
257 block->type_id = type_id;
258 return freeptr;
259 }
260 }
261
262 void SharedMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) {
263 int32_t remaining =
264 mem_size_ - subtle::NoBarrier_Load(&shared_meta_->freeptr);
265 meminfo->total = mem_size_;
266 meminfo->free = IsCorrupted() ? 0 : remaining - sizeof(BlockHeader);
267 }
268
269 void SharedMemoryAllocator::MakeIterable(int32_t offset) {
270 if (IsCorrupted())
271 return;
272 BlockHeader* block = GetBlock(offset, 0, 0, false);
273 if (!block) // invalid offset
274 return;
275 if (subtle::NoBarrier_Load(&block->next) != 0) // previously set iterable
276 return;
277 subtle::NoBarrier_Store(&block->next, OFFSET_QUEUE); // will be tail block
278
279 // Try to add this block to the tail of the queue. May take multiple tries.
280 int32_t tail;
281 for (;;) {
282 tail = subtle::Acquire_Load(&shared_meta_->tailptr);
283 block = GetBlock(tail, 0, 0, true);
284 if (!block) {
285 SetCorrupted();
286 return;
287 }
288 int32_t next = subtle::NoBarrier_Load(&block->next);
289
290 // Ensure that the tail pointer didn't change while reading next. Only
291 // the read of the tail pointer is atomic but we need to read both the
292 // tail pointer and the next pointer from it in an atomic fashion. The
293 // way to do this is to read both non-atomically and then verify after
294 // the second read that the first read is still valid/unchanged.
295 if (tail == subtle::Release_Load(&shared_meta_->tailptr)) {
Dmitry Vyukov 2015/11/04 13:52:29 Why do we need the atomic read of both fields? nex
bcwhite 2015/11/04 17:18:55 This is how it is done in the M&S Queue paper -- t
Dmitry Vyukov 2015/11/04 17:33:08 What will break if we remove the CAS?
bcwhite 2015/11/04 18:40:16 As I understand it... If we remove the block-next
Dmitry Vyukov 2015/11/05 10:49:42 We need to understand this algorithm well enough t
bcwhite 2015/11/05 14:37:15 Fair enough. Let's see what happens.
296 // Check if the found block is truely the last in the queue (i.e. it
297 // points back to the "queue" node).
298 if (next == OFFSET_QUEUE) {
299 // Yes. Try to append the passed block after the current tail block.
300 if (subtle::Release_CompareAndSwap(
301 &block->next, OFFSET_QUEUE, offset) == OFFSET_QUEUE) {
302 // Success! The block is enqueued; need to update the tail pointer.
303 break;
304 }
305 } else {
306 // No. Another thread has stopped between the block-next update
307 // and the tail-pointer update. Try to update tailptr past the
308 // found block. That other thread may complete it first or it
309 // may have crashed. Be fail-safe.
310 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, next);
311 }
312 }
313 }
314
315 // Block has been enqueued. Now update the tail-pointer past it. This
316 // could fail if another thread has already completed the operation as
317 // part of being fail-safe.
318 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, offset);
319 }
320
321 void SharedMemoryAllocator::CreateIterator(Iterator* state) {
322 state->last = OFFSET_QUEUE;
323 state->niter = 0;
324 }
325
326 int32_t SharedMemoryAllocator::GetNextIterable(Iterator* state,
327 int32_t* type_id) {
328 const BlockHeader* block = GetBlock(state->last, 0, 0, true);
329 if (!block) // invalid iterator state
330 return OFFSET_NULL;
331 int32_t next = subtle::NoBarrier_Load(&block->next);
Dmitry Vyukov 2015/11/04 13:52:28 this needs to be Acquire_Load, this is what acquir
bcwhite 2015/11/04 17:18:55 Whew! I've added a comment according to my unders
332 block = GetBlock(next, 0, 0, false);
333 if (!block) // no next allocation in queue
334 return OFFSET_NULL;
335
336 // Memory corruption could cause a loop in the list. We need to detect
337 // that so as to not cause an infinite loop in the caller. We do this
338 // simply by making sure we don't iterate more than the absolute maximum
339 // number of allocations that could have been made. Callers are likely
340 // to loop multiple times before it is detected but at least it stops.
341 int32_t freeptr = std::min(subtle::Acquire_Load(&shared_meta_->freeptr),
Dmitry Vyukov 2015/11/04 13:52:29 visibility over what do we acquire here?
bcwhite 2015/11/04 17:18:55 There must be something I don't understand about a
Dmitry Vyukov 2015/11/05 10:49:42 Just atomic load is NoBarrier_Load. Acquire/Releas
342 mem_size_);
343 if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) {
344 SetCorrupted();
345 return OFFSET_NULL;
346 }
347
348 state->last = next;
349 state->niter++;
350 *type_id = block->type_id;
351
352 return next;
353 }
354
355 // The "corrupted" state is held both locally and globally (shared). The
356 // shared flag can't be trusted since a malicious actor could overwrite it.
357 // The local version is immune to foreign actors. Thus, if seen shared,
358 // copy it locally and, once known, always restore it globally.
359 void SharedMemoryAllocator::SetCorrupted() {
360 LOG(ERROR) << "Corruption detected in shared-memory segment.";
361 subtle::NoBarrier_Store(&corrupted_, 1);
362 SetFlag(&shared_meta_->flags, kFlagCorrupted);
363 }
364
365 bool SharedMemoryAllocator::IsCorrupted() {
366 if (subtle::NoBarrier_Load(&corrupted_) ||
367 CheckFlag(&shared_meta_->flags, kFlagCorrupted)) {
368 SetCorrupted(); // Make sure all indicators are set.
369 return true;
370 }
371 return false;
372 }
373
374 bool SharedMemoryAllocator::IsFull() {
375 return CheckFlag(&shared_meta_->flags, kFlagFull);
376 }
377
378 // Dereference a block |offset| and ensure that it's valid for the desired
379 // |type_id| and |size|. |special| indicates that we may try to access block
380 // headers not available to callers but still accessed by this module. By
381 // having internal dereferences go through this same function, the allocator
382 // is hardened against corruption.
383 SharedMemoryAllocator::BlockHeader* SharedMemoryAllocator::GetBlock(
384 int32_t offset,
385 int32_t type_id,
386 int32_t size,
387 bool special) {
Dmitry Vyukov 2015/11/04 13:52:29 Split special flag into two flags: one allows to g
bcwhite 2015/11/04 17:18:55 Done.
388 // Validation of parameters.
389 if (offset % kAllocAlignment != 0)
390 return nullptr;
391 if (offset < (int)(special ? OFFSET_QUEUE : sizeof(SharedMetadata)))
392 return nullptr;
393 size += sizeof(BlockHeader);
394 if (offset + size > mem_size_)
395 return nullptr;
396 int32_t freeptr = subtle::NoBarrier_Load(&shared_meta_->freeptr);
397 if (offset + size > freeptr)
398 return nullptr;
399
400 // Validation of referenced block-header.
401 const BlockHeader* block = reinterpret_cast<BlockHeader*>(mem_base_ + offset);
402 if (block->size < size)
403 return nullptr;
404 if (!special && block->cookie != kBlockCookieAllocated)
405 return nullptr;
406 if (type_id != 0 && block->type_id != type_id)
407 return nullptr;
408
409 // Return pointer to block data.
410 return reinterpret_cast<BlockHeader*>(mem_base_ + offset);
411 }
412
413 void* SharedMemoryAllocator::GetBlockData(int32_t offset,
414 int32_t type_id,
415 int32_t size,
416 bool special) {
417 DCHECK(size > 0);
418 BlockHeader* block = GetBlock(offset, type_id, size, special);
419 if (!block)
420 return nullptr;
421 return reinterpret_cast<char*>(block) + sizeof(BlockHeader);
422 }
423
424 } // namespace base
OLDNEW
« no previous file with comments | « base/memory/shared_memory_allocator.h ('k') | base/memory/shared_memory_allocator_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698