OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/memory/shared_memory_allocator.h" | |
6 | |
7 #include <assert.h> | |
8 #include <algorithm> | |
9 | |
10 #include "base/logging.h" | |
11 | |
12 // All integer constants in this file are signed because Atomic32 is signed | |
13 // and keeping all others consistent with this avoids a lot of unnecessary | |
14 // casting to avoid signed/unsigned operations just to avoid compiler errors. | |
15 // This means an occasonal cast of a constant from sizeof() to "int" but | |
16 // is far simpler than the alternative. | |
mdempsky
2015/11/10 19:47:55
I notice that 1) there's no checks to ensure the s
bcwhite
2015/11/10 21:17:34
There are size_t checks where it's used (which is
mdempsky
2015/11/10 22:45:52
Style guide says "Do not use C-style casts."
| |
17 | |
18 namespace { | |
19 | |
20 // Required range of memory segment sizes. It has to fit in a signed 32-bit | |
21 // number and should be a power of 2 in order to accomodate almost any page | |
22 // size. | |
23 const int32_t kSegmentMinSize = 1 << 10; // 1 KiB | |
24 const int32_t kSegmentMaxSize = 1 << 30; // 1 GiB | |
25 | |
26 // All allocations and data-structures must be aligned to this byte boundary. | |
27 // Alignment as large as the physical bus between CPU and RAM is _required_ | |
28 // for some architectures, is simply more efficient on other CPUs, and | |
29 // generally a Good Idea(tm) for all platforms as it reduces/eliminates the | |
30 // chance that a type will span cache lines. Alignment mustn't be less | |
31 // than 8 to ensure proper alignment for all types. The rest is a balance | |
32 // between reducing spans across multiple cache lines and wasted space spent | |
33 // padding out allocations. An alignment of 16 would ensure that the block | |
34 // header structure always sits in a single cache line. An average of about | |
35 // 1/2 this value will be wasted with every allocation. | |
36 const int32_t kAllocAlignment = 8; | |
37 | |
38 // A constant (random) value placed in the shared metadata to identify | |
39 // an already initialized memory segment. | |
40 const int32_t kGlobalCookie = 0x408305DC; | |
41 | |
42 // The current version of the metadata. If updates are made that change | |
43 // the metadata, the version number can be queried to operate in a backward- | |
44 // compatible manner until the memory segment is completely re-initalized. | |
45 const int32_t kGlobalVersion = 1; | |
46 | |
47 // Constant values placed in the block headers to indicate its state. | |
48 const int32_t kBlockCookieFree = 0; | |
49 const int32_t kBlockCookieQueue = 1; | |
50 const int32_t kBlockCookieWasted = -1; | |
51 const int32_t kBlockCookieAllocated = 0xC8799269; | |
52 | |
53 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> | |
54 // types rather than combined bitfield. | |
55 | |
56 // Flags stored in the flags_ field of the SharedMetaData structure below. | |
57 enum : int32_t { | |
58 kFlagCorrupt = 1 << 0, | |
59 kFlagFull = 1 << 1 | |
60 }; | |
61 | |
62 bool CheckFlag(base::subtle::Atomic32* flags, int flag) { | |
63 base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags); | |
64 return (loaded_flags & flag) != 0; | |
65 } | |
66 | |
67 void SetFlag(base::subtle::Atomic32* flags, int flag) { | |
68 for (;;) { | |
69 base::subtle::Atomic32 loaded_flags = base::subtle::Acquire_Load(flags); | |
70 base::subtle::Atomic32 new_flags = | |
71 (loaded_flags & ~flag) | flag; | |
72 if (base::subtle::Release_CompareAndSwap( | |
73 flags, loaded_flags, new_flags) == loaded_flags) { | |
74 break; | |
75 } | |
76 } | |
77 } | |
78 | |
79 } // namespace | |
80 | |
81 namespace base { | |
82 | |
83 // The block-header is placed at the top of every allocation within the | |
84 // segment to describe the data that follows it. | |
85 struct SharedMemoryAllocator::BlockHeader { | |
86 int32_t size; // Number of bytes in this block, including header. | |
87 int32_t cookie; // Constant value indicating completed allocation. | |
88 uint32_t type_id; // A number provided by caller indicating data type. | |
89 subtle::Atomic32 next; // Pointer to the next block when iterating. | |
90 }; | |
91 | |
92 // The shared metadata exists once at the top of the memory segment to | |
93 // describe the state of the allocator to all processes. | |
94 struct SharedMemoryAllocator::SharedMetadata { | |
95 int32_t cookie; // Some value that indicates complete initialization. | |
96 int32_t size; // Total size of memory segment. | |
97 int32_t page_size; // Paging size within memory segment. | |
98 int32_t version; // Version code so upgrades don't break. | |
99 subtle::Atomic32 freeptr; // Offset/ref to first free space in the segment. | |
100 subtle::Atomic32 flags; // Bitfield of information flags. | |
101 int32_t reserved; // Padding to ensure size is multiple of alignment. | |
102 | |
103 // The "iterable" queue is an M&S Queue as described here, append-only: | |
104 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf | |
105 subtle::Atomic32 tailptr; // Last block available for iteration. | |
106 BlockHeader queue; // Empty block for linked-list head/tail. (must be last) | |
107 }; | |
108 | |
109 // The "queue" block header is used to detect "last node" so that zero/null | |
110 // can be used to indicate that it hasn't been added at all. It is part of | |
111 // the SharedMetadata structure which itself is always located at offset zero. | |
112 // This can't be a constant because SharedMetadata is a private definition. | |
mdempsky
2015/11/10 19:47:55
FWIW, this is valid C++:
class Foo {
struct
bcwhite
2015/11/10 21:17:34
That would require the constant to be declared in
mdempsky
2015/11/10 22:45:52
Declared, yes. Is that problematic somehow?
bcwhite
2015/11/11 13:25:14
Nope. Just making sure I understood.
| |
113 #define REF_QUEUE offsetof(SharedMetadata, queue) | |
114 #define REF_NULL 0 // the equivalest NULL value for a reference | |
mdempsky
2015/11/10 19:47:55
typo: equivalent
bcwhite
2015/11/10 21:17:34
Done.
| |
115 | |
116 SharedMemoryAllocator::SharedMemoryAllocator(void* base, | |
117 size_t size, | |
118 size_t page_size) | |
119 : mem_base_(static_cast<char*>(base)), | |
120 mem_size_((int32_t)size), | |
121 mem_page_((int32_t)(page_size ? page_size : size)), | |
122 corrupted_(0) { | |
123 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, | |
124 "BlockHeader is not a multiple of kAllocAlignment"); | |
125 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, | |
126 "SharedMetadata is not a multiple of kAllocAlignment"); | |
127 | |
128 CHECK(base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0); | |
129 CHECK(size >= kSegmentMinSize && size <= kSegmentMaxSize && | |
130 size % kAllocAlignment == 0); | |
131 CHECK(page_size == 0 || size % page_size == 0); | |
132 | |
133 if (shared_meta()->cookie != kGlobalCookie) { | |
134 // This block is only executed when a completely new memory segment is | |
135 // being initialized. It's unshared and single-threaded... | |
136 const BlockHeader* first_block = reinterpret_cast<BlockHeader*>( | |
137 mem_base_ + sizeof(SharedMetadata)); | |
138 if (shared_meta()->cookie != 0 || | |
139 shared_meta()->size != 0 || | |
140 shared_meta()->version != 0 || | |
141 subtle::NoBarrier_Load(&shared_meta()->freeptr) != 0 || | |
142 subtle::NoBarrier_Load(&shared_meta()->flags) != 0 || | |
143 shared_meta()->tailptr != 0 || | |
144 shared_meta()->queue.cookie != 0 || | |
145 subtle::NoBarrier_Load(&shared_meta()->queue.next) != 0 || | |
146 first_block->size != 0 || | |
147 first_block->cookie != 0 || | |
148 first_block->type_id != 0 || | |
149 first_block->next != 0) { | |
150 // ...or something malicious has been playing with the metadata. | |
151 NOTREACHED(); | |
152 SetCorrupt(); | |
153 } | |
154 | |
155 // This is still safe to do even if corruption has been detected. | |
156 shared_meta()->cookie = kGlobalCookie; | |
157 shared_meta()->size = mem_size_; | |
158 shared_meta()->page_size = mem_page_; | |
159 shared_meta()->version = kGlobalVersion; | |
160 subtle::NoBarrier_Store(&shared_meta()->freeptr, sizeof(SharedMetadata)); | |
161 | |
162 // Set up the queue of iterable allocations. | |
163 shared_meta()->queue.size = sizeof(BlockHeader); | |
164 shared_meta()->queue.cookie = kBlockCookieQueue; | |
165 subtle::NoBarrier_Store(&shared_meta()->queue.next, REF_QUEUE); | |
166 subtle::NoBarrier_Store(&shared_meta()->tailptr, REF_QUEUE); | |
167 } else { | |
168 // The allocator is attaching to a previously initialized segment of | |
169 // memory. Make sure the embedded data matches what has been passed. | |
170 if (shared_meta()->size != mem_size_ || | |
171 shared_meta()->page_size != mem_page_) { | |
172 NOTREACHED(); | |
173 SetCorrupt(); | |
174 } | |
175 } | |
176 } | |
177 | |
178 SharedMemoryAllocator::~SharedMemoryAllocator() {} | |
179 | |
180 size_t SharedMemoryAllocator::GetAllocSize(Reference ref) { | |
181 BlockHeader* block = GetBlock(ref, 0, 0, false, false); | |
182 if (!block) | |
183 return 0; | |
184 int32_t size = block->size; | |
185 // Header was verified by GetBlock() but a malicious actor could change | |
186 // the value between there and here. Check it again. | |
187 if (size <= (int)sizeof(BlockHeader) || ref + size >= mem_size_) | |
188 return 0; | |
189 return (size_t)size - sizeof(BlockHeader); | |
190 } | |
191 | |
192 int32_t SharedMemoryAllocator::Allocate(size_t usize, uint32_t type_id) { | |
193 // Round up the requested size, plus header, to the next allocation alignment. | |
194 int32_t size = (int)usize + sizeof(BlockHeader); | |
195 size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); | |
196 if (usize > (size_t)std::numeric_limits<int32_t>::max() || | |
197 size <= (int)sizeof(BlockHeader) || size > mem_page_) { | |
198 NOTREACHED(); | |
199 return REF_NULL; | |
200 } | |
201 | |
202 // Allocation is lockless so we do all our caculation and then, if saving | |
203 // indicates a change has occurred since we started, scrap everything and | |
204 // start over. | |
205 for (;;) { | |
206 if (IsCorrupt()) | |
207 return REF_NULL; | |
208 | |
209 // Get the current start of unallocated memory. Other threads may | |
210 // update this at any time and cause us to retry these operations. | |
211 int32_t freeptr = subtle::NoBarrier_Load(&shared_meta()->freeptr); | |
212 if (freeptr + size > mem_size_) { | |
213 SetFlag(&shared_meta()->flags, kFlagFull); | |
214 return REF_NULL; | |
215 } | |
216 | |
217 // Get pointer to the "free" block. It doesn't even have a header; pass | |
218 // -sizeof(header) so accouting for that will yield an expected size of | |
219 // zero which is what will be stored at that location. If something | |
220 // has been allocated since the load of freeptr above, it is still safe | |
221 // as nothing will be written to that location until after the CAS below. | |
222 BlockHeader* block = GetBlock(freeptr, 0, 0, false, true); | |
223 if (!block) { | |
224 SetCorrupt(); | |
225 return REF_NULL; | |
226 } | |
227 | |
228 // An allocation cannot cross page boundaries. If it would, create a | |
229 // "wasted" block and begin again at the top of the next page. This | |
230 // area could just be left empty but we fill in the block header just | |
231 // for completeness sake. | |
232 int32_t page_free = mem_page_ - freeptr % mem_page_; | |
233 if (size > page_free) { | |
234 if (page_free <= sizeof(BlockHeader)) { | |
235 SetCorrupt(); | |
236 return REF_NULL; | |
237 } | |
238 int32_t new_freeptr = freeptr + page_free; | |
239 if (subtle::NoBarrier_CompareAndSwap( | |
240 &shared_meta()->freeptr, freeptr, new_freeptr) == freeptr) { | |
241 block->size = page_free; | |
242 block->cookie = kBlockCookieWasted; | |
243 } | |
244 continue; | |
245 } | |
246 | |
247 // Don't leave a slice at the end of a page too small for anything. This | |
248 // can result in an allocation up to two alignment-sizes greater than the | |
249 // minimum required by requested-size + header + alignment. | |
250 if (page_free - size < (int)(sizeof(BlockHeader) + kAllocAlignment)) | |
251 size = page_free; | |
252 | |
253 int32_t new_freeptr = freeptr + size; | |
254 if (new_freeptr > mem_size_) { | |
255 SetCorrupt(); | |
256 return REF_NULL; | |
257 } | |
258 | |
259 if (subtle::NoBarrier_CompareAndSwap( | |
260 &shared_meta()->freeptr, freeptr, new_freeptr) != freeptr) { | |
261 // Another thread must have completed an allocation while we were working. | |
262 // Try again. | |
263 continue; | |
264 } | |
265 | |
266 // Given that all memory was zeroed before ever being given to an instance | |
267 // of this class and given that we only allocate in a monotomic fashion | |
268 // going forward, it must be that the newly allocated block is completely | |
269 // full of zeros. If we find anything in the block header that is NOT a | |
270 // zero then something must have previously run amuck through memory, | |
271 // writing beyond the allocated space and into unallocated space. | |
272 if (block->size != 0 || | |
273 block->cookie != kBlockCookieFree || | |
274 block->type_id != 0 || | |
275 subtle::NoBarrier_Load(&block->next) != 0) { | |
276 SetCorrupt(); | |
277 return REF_NULL; | |
278 } | |
279 | |
280 block->size = size; | |
281 block->cookie = kBlockCookieAllocated; | |
282 block->type_id = type_id; | |
283 return freeptr; | |
284 } | |
285 } | |
286 | |
287 void SharedMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) { | |
288 int32_t remaining = | |
289 mem_size_ - subtle::NoBarrier_Load(&shared_meta()->freeptr); | |
290 meminfo->total = mem_size_; | |
291 meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader); | |
292 } | |
293 | |
294 void SharedMemoryAllocator::MakeIterable(Reference ref) { | |
295 if (IsCorrupt()) | |
296 return; | |
297 BlockHeader* block = GetBlock(ref, 0, 0, false, false); | |
298 if (!block) // invalid reference | |
299 return; | |
300 if (subtle::Acquire_Load(&block->next) != 0) // previously set iterable | |
301 return; | |
302 subtle::Release_Store(&block->next, REF_QUEUE); // will be tail block | |
303 | |
304 // Try to add this block to the tail of the queue. May take multiple tries. | |
305 int32_t tail; | |
306 for (;;) { | |
307 // Acquire the current tail-pointer released by previous call to this | |
308 // method and validate it. | |
309 tail = subtle::Acquire_Load(&shared_meta()->tailptr); | |
310 block = GetBlock(tail, 0, 0, true, false); | |
311 if (!block) { | |
312 SetCorrupt(); | |
313 return; | |
314 } | |
315 | |
316 // Try to insert the block at the tail of the queue. The tail node always | |
317 // has an existing value of REF_QUEUE; if that is not the value returned, | |
318 // another thread has acted in the meantime. | |
319 int32_t next = subtle::Release_CompareAndSwap(&block->next, REF_QUEUE, ref); | |
320 if (next == REF_QUEUE) { | |
321 // Update the tail pointer to the new offset. If the "else" clause did | |
322 // not exist, then this could be a simple Release_Store to set the new | |
323 // value but because it does, it's possible that other threads could add | |
324 // one or more nodes at the tail before reaching this point. We don't | |
325 // have to check the return value because it either operates correctly | |
326 // or the exact same operation has already been done (by the "else" | |
327 // clause). | |
328 subtle::Release_CompareAndSwap(&shared_meta()->tailptr, tail, ref); | |
329 return; | |
330 } else { | |
331 // In the unlikely case that a thread crashed or was killed between the | |
332 // update of "next" and the update of "tailptr", it is necessary to | |
333 // perform the operation that would have been done. There's no explicit | |
334 // check for crash/kill which means that this operation may also happen | |
335 // even when the other thread is in perfect working order which is what | |
336 // necessitates the CompareAndSwap above. | |
337 subtle::Release_CompareAndSwap(&shared_meta()->tailptr, tail, next); | |
338 } | |
339 } | |
340 } | |
341 | |
342 void SharedMemoryAllocator::CreateIterator(Iterator* state) { | |
343 state->last = REF_QUEUE; | |
344 state->niter = 0; | |
345 } | |
346 | |
347 int32_t SharedMemoryAllocator::GetNextIterable(Iterator* state, | |
348 uint32_t* type_id) { | |
349 const BlockHeader* block = GetBlock(state->last, 0, 0, true, false); | |
350 if (!block) // invalid iterator state | |
351 return REF_NULL; | |
352 | |
353 // The compiler and CPU can freely reorder all memory accesses on which | |
354 // there are no dependencies. It could, for example, move the load of | |
355 // "freeptr" above this point because there are no explicit dependencies | |
356 // between it and "next". If it did, however, then another block could | |
357 // be queued after that but before the following load meaning there is | |
358 // one more queued block than the future "detect loop by having more | |
359 // blocks that could fit before freeptr" will allow. | |
360 // | |
361 // By "acquiring" the "next" value here, it's synchronized to the enqueue | |
362 // of the node which in turn is synchronized to the allocation (which sets | |
363 // freeptr). Thus, the scenario above cannot happen. | |
364 int32_t next = subtle::Acquire_Load(&block->next); | |
365 block = GetBlock(next, 0, 0, false, false); | |
366 if (!block) // no next allocation in queue | |
367 return REF_NULL; | |
368 | |
369 // Memory corruption could cause a loop in the list. We need to detect | |
370 // that so as to not cause an infinite loop in the caller. We do this | |
371 // simply by making sure we don't iterate more than the absolute maximum | |
372 // number of allocations that could have been made. Callers are likely | |
373 // to loop multiple times before it is detected but at least it stops. | |
374 int32_t freeptr = std::min(subtle::Acquire_Load(&shared_meta()->freeptr), | |
375 mem_size_); | |
376 if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) { | |
377 SetCorrupt(); | |
378 return REF_NULL; | |
379 } | |
380 | |
381 state->last = next; | |
382 state->niter++; | |
383 *type_id = block->type_id; | |
384 | |
385 return next; | |
386 } | |
387 | |
388 // The "corrupted" state is held both locally and globally (shared). The | |
389 // shared flag can't be trusted since a malicious actor could overwrite it. | |
390 // The local version is immune to foreign actors. Thus, if seen shared, | |
391 // copy it locally and, once known, always restore it globally. | |
392 void SharedMemoryAllocator::SetCorrupt() { | |
393 LOG(ERROR) << "Corruption detected in shared-memory segment."; | |
394 subtle::NoBarrier_Store(&corrupted_, 1); | |
mdempsky
2015/11/10 19:47:55
(I take individual SharedMemoryAllocator objects m
bcwhite
2015/11/10 21:17:34
Yes.
| |
395 SetFlag(&shared_meta()->flags, kFlagCorrupt); | |
396 } | |
397 | |
398 bool SharedMemoryAllocator::IsCorrupt() { | |
399 if (subtle::NoBarrier_Load(&corrupted_) || | |
400 CheckFlag(&shared_meta()->flags, kFlagCorrupt)) { | |
401 SetCorrupt(); // Make sure all indicators are set. | |
402 return true; | |
403 } | |
404 return false; | |
405 } | |
406 | |
407 bool SharedMemoryAllocator::IsFull() { | |
408 return CheckFlag(&shared_meta()->flags, kFlagFull); | |
409 } | |
410 | |
411 // Dereference a block |ref| and ensure that it's valid for the desired | |
412 // |type_id| and |size|. |special| indicates that we may try to access block | |
413 // headers not available to callers but still accessed by this module. By | |
414 // having internal dereferences go through this same function, the allocator | |
415 // is hardened against corruption. | |
416 SharedMemoryAllocator::BlockHeader* SharedMemoryAllocator::GetBlock( | |
417 Reference ref, | |
418 uint32_t type_id, | |
419 int32_t size, | |
420 bool queue_ok, | |
421 bool free_ok) { | |
422 // Validation of parameters. | |
423 if (ref % kAllocAlignment != 0) | |
424 return nullptr; | |
425 if (ref < (int)(queue_ok ? REF_QUEUE : sizeof(SharedMetadata))) | |
426 return nullptr; | |
427 size += sizeof(BlockHeader); | |
428 if (ref + size > mem_size_) | |
429 return nullptr; | |
430 | |
431 // Validation of referenced block-header. | |
432 if (!free_ok) { | |
433 int32_t freeptr = subtle::NoBarrier_Load(&shared_meta()->freeptr); | |
434 if (ref + size > freeptr) | |
435 return nullptr; | |
436 const BlockHeader* block = | |
437 reinterpret_cast<BlockHeader*>(mem_base_ + ref); | |
438 if (block->size < size) | |
mdempsky
2015/11/10 19:47:55
Like JF and I mentioned, for C++ correctness, bloc
Alexander Potapenko
2015/11/10 20:47:10
By "volatile" did you mean "atomic"?
(this is basi
bcwhite
2015/11/10 21:17:35
You mean "volatile T* GetAsObject(...) {...}"? I
mdempsky
2015/11/10 22:45:52
JF said they need to both volatile and atomic (and
mdempsky
2015/11/10 22:45:52
Yes.
| |
439 return nullptr; | |
440 if (ref != REF_QUEUE && block->cookie != kBlockCookieAllocated) | |
441 return nullptr; | |
442 if (type_id != 0 && block->type_id != type_id) | |
443 return nullptr; | |
444 } | |
445 | |
446 // Return pointer to block data. | |
447 return reinterpret_cast<BlockHeader*>(mem_base_ + ref); | |
448 } | |
449 | |
450 void* SharedMemoryAllocator::GetBlockData(Reference ref, | |
451 uint32_t type_id, | |
452 int32_t size) { | |
453 DCHECK(size > 0); | |
454 BlockHeader* block = GetBlock(ref, type_id, size, false, false); | |
455 if (!block) | |
456 return nullptr; | |
457 return reinterpret_cast<char*>(block) + sizeof(BlockHeader); | |
458 } | |
459 | |
460 } // namespace base | |
OLD | NEW |