OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/memory/shared_memory_allocator.h" | |
6 | |
7 #include <assert.h> | |
8 | |
9 #include "base/atomicops.h" | |
10 #include "base/logging.h" | |
11 | |
12 // All integer constants in this file are signed because Atomic32 is signed | |
13 // and keeping all others consistent with this avoids a lot of unnecessary | |
14 // casting to avoid signed/unsigned operations just to avoid compiler errors. | |
15 // This means an occasonal cast of a constant from sizeof() to "int" but | |
16 // is far simpler than the alternative. | |
17 | |
18 namespace { | |
19 | |
20 // All allocations and data-structures must be aligned to this byte boundary. | |
21 // It shouldn't be less than 8 so that 64-bit values can be read in a single | |
22 // RAM bus access. 16 can be used so that the block header would always fall | |
23 // within a single cache line. | |
24 const int32_t kAllocAlignment = 8; | |
25 | |
26 // A constant (random) value placed in the shared metadata to identify | |
27 // an already initialized memory segment. | |
28 const int32_t kGlobalCookie = 0x408305DC; | |
29 | |
30 // The current version of the metadata. If updates are made that change | |
31 // the metadata, the version number can be queried to operate in a backward- | |
32 // compatible manner until the memory segment is completely re-initalized. | |
33 const int32_t kGlobalVersion = 1; | |
34 | |
35 // Constant values placed in the block headers to indicate its state. | |
36 const int32_t kBlockCookieFree = 0; | |
37 const int32_t kBlockCookieQueue = 1; | |
38 const int32_t kBlockCookieWasted = -1; | |
39 const int32_t kBlockCookieAllocated = 0xC8799269; | |
40 | |
41 } // namespace | |
42 | |
43 namespace base { | |
44 | |
45 // The block-header is placed at the top of every allocation within the | |
46 // segment to describe the data that follows it. | |
47 struct SharedMemoryAllocator::BlockHeader { | |
48 int32_t size; // Number of bytes in this block, including header. | |
49 int32_t cookie; // Constant value indicating completed allocation. | |
50 int32_t type; // A number provided by caller indicating data type. | |
51 subtle::Atomic32 next; // Pointer to the next block when iterating | |
52 }; | |
53 | |
54 // The shared metadata exists once at the top of the memory segment to | |
55 // describe the state of the allocator to all processes. | |
56 struct SharedMemoryAllocator::SharedMetadata { | |
57 int32_t cookie; // Some value that indicates complete initialization. | |
58 int32_t size; // Total size of memory segment. | |
59 int32_t page_size; // Paging size within memory segment. | |
60 int32_t version; // Version code so upgrades don't break. | |
61 subtle::Atomic32 freeptr; // Offset to first free space in the segment. | |
62 char corrupted; // Flag indicating that corruption has been detected. | |
63 char full; // Flag indicating alloc failed because segment is full. | |
64 char flags[2]; // Future flags. (exact padding to int boundary) | |
65 int32_t reserved; // Padding to ensure size is multiple of alignment. | |
66 | |
67 // The "iterable" queue is an M&S Queue as described here, append-only: | |
68 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf | |
69 subtle::Atomic32 tailptr; // Last block available for iteration. | |
70 BlockHeader queue; // Empty block for linked-list head/tail. (must be last) | |
71 }; | |
72 | |
73 // The "queue" block header is used to detect "last node" so that zero/null | |
74 // can be used to indicate that it hasn't been added at all. It is part of | |
75 // the SharedMetadata structure which itself is always located at offset zero. | |
76 // This can't be a constant because SharedMetadata is a private definition. | |
77 #define OFFSET_QUEUE offsetof(SharedMetadata, queue) | |
78 #define OFFSET_NULL 0 // the equivalest NULL value for an offset | |
79 | |
80 SharedMemoryAllocator::SharedMemoryAllocator(void* base, int32_t size, | |
81 int32_t page_size) | |
82 : shared_meta_(static_cast<SharedMetadata*>(base)), | |
83 mem_base_(static_cast<char*>(base)), | |
84 mem_size_(size), | |
85 mem_page_(page_size ? page_size : size), | |
86 last_seen_(0), | |
87 corrupted_(false) { | |
88 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, | |
89 "BlockHeader is not a multiple of kAllocAlignment"); | |
90 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, | |
91 "SharedMetadata is not a multiple of kAllocAlignment"); | |
92 | |
93 DCHECK(base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0); | |
94 DCHECK(size >= 1 << 10 && size <= 1 << 20 && // 1 KiB <= size <= 1 MiB | |
95 size % kAllocAlignment == 0); | |
96 DCHECK(page_size >= 0 && (page_size == 0 || size % page_size == 0)); | |
97 | |
98 if (shared_meta_->cookie != kGlobalCookie) { | |
99 // This block is only executed when a completely new memory segment is | |
100 // being initialized. It's unshared and single-threaded... | |
101 const BlockHeader* first_block = reinterpret_cast<BlockHeader*>( | |
102 mem_base_ + sizeof(SharedMetadata)); | |
103 if (shared_meta_->cookie != 0 || | |
104 shared_meta_->size != 0 || | |
105 shared_meta_->version != 0 || | |
106 subtle::NoBarrier_Load(&shared_meta_->freeptr) != 0 || | |
107 shared_meta_->corrupted != 0 || | |
108 shared_meta_->full != 0 || | |
109 shared_meta_->tailptr != 0 || | |
110 shared_meta_->queue.cookie != 0 || | |
111 subtle::NoBarrier_Load(&shared_meta_->queue.next) != 0 || | |
112 first_block->size != 0 || | |
113 first_block->cookie != 0 || | |
114 first_block->type != 0 || | |
115 first_block->next != 0) { | |
116 // ...or something malicious has been playing with the metadata. | |
117 NOTREACHED(); | |
118 SetCorrupted(); | |
119 } | |
120 | |
121 // This is still safe to do even if corruption has been detected. | |
122 shared_meta_->cookie = kGlobalCookie; | |
123 shared_meta_->size = size; | |
124 shared_meta_->page_size = page_size; | |
125 shared_meta_->version = kGlobalVersion; | |
126 subtle::NoBarrier_Store(&shared_meta_->freeptr, sizeof(SharedMetadata)); | |
127 | |
128 // Set up the queue of iterable allocations. | |
129 shared_meta_->queue.size = sizeof(BlockHeader); | |
130 shared_meta_->queue.cookie = kBlockCookieQueue; | |
131 subtle::NoBarrier_Store(&shared_meta_->queue.next, OFFSET_QUEUE); | |
132 subtle::NoBarrier_Store(&shared_meta_->tailptr, OFFSET_QUEUE); | |
133 } else { | |
134 // The allocator is attaching to a previously initialized segment of | |
135 // memory. Make sure the embedded data matches what has been passed. | |
136 if (shared_meta_->size != size || | |
137 shared_meta_->page_size != page_size) { | |
138 NOTREACHED(); | |
139 SetCorrupted(); | |
140 } | |
141 } | |
142 } | |
143 | |
144 SharedMemoryAllocator::~SharedMemoryAllocator() { | |
145 } | |
146 | |
147 int32_t SharedMemoryAllocator::Allocate(int32_t size, int32_t type) { | |
148 if (size < 0) { | |
149 NOTREACHED(); | |
150 return OFFSET_NULL; | |
151 } | |
152 | |
153 // Round up the requested size, plus header, to the next allocation alignment. | |
154 size += sizeof(BlockHeader); | |
155 size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); | |
156 if (size > mem_page_) | |
157 return OFFSET_NULL; | |
158 | |
159 // Allocation is lockless so we do all our caculation and then, if saving | |
160 // indicates a change has occurred since we started, scrap everything and | |
161 // start over. | |
162 for (;;) { | |
163 if (IsCorrupted()) | |
164 return OFFSET_NULL; | |
165 | |
166 int32_t freeptr = subtle::Acquire_Load(&shared_meta_->freeptr); | |
167 if (freeptr + size > mem_size_) { | |
168 shared_meta_->full = true; | |
169 return OFFSET_NULL; | |
170 } | |
171 | |
172 // Get pointer to the "free" block. It doesn't even have a header; pass | |
173 // -sizeof(header) so accouting for that will yield an expected size of | |
174 // zero which is what will be stored at that location. If something | |
175 // has been allocated since the load of freeptr above, it is still safe | |
176 // as nothing will be written to that location until after the CAS below. | |
177 BlockHeader* block = GetBlock(freeptr, 0, -(int)sizeof(BlockHeader), true); | |
178 if (!block) { | |
179 SetCorrupted(); | |
180 return OFFSET_NULL; | |
181 } | |
182 | |
183 // An allocation cannot cross page boundaries. If it would, create a | |
184 // "wasted" block and begin again at the top of the next page. | |
185 int32_t page_free = mem_page_ - freeptr % mem_page_; | |
186 if (size > page_free) { | |
187 int32_t new_freeptr = freeptr + page_free; | |
188 if (subtle::Release_CompareAndSwap( | |
189 &shared_meta_->freeptr, freeptr, new_freeptr) == freeptr) { | |
190 block->size = page_free; | |
191 block->cookie = kBlockCookieWasted; | |
192 } | |
193 continue; | |
194 } | |
195 | |
196 // Don't leave a slice at the end of a page too small for anything. This | |
197 // can result in an allocation up to two alignment-sizes greater than the | |
198 // minimum required by requested-size + header + alignment. | |
199 if (page_free - size < (int)(sizeof(BlockHeader) + kAllocAlignment)) | |
200 size = page_free; | |
201 | |
202 int32_t new_freeptr = freeptr + size; | |
203 if (new_freeptr > mem_size_) { | |
204 SetCorrupted(); | |
205 return OFFSET_NULL; | |
206 } | |
207 | |
208 if (subtle::Release_CompareAndSwap( | |
209 &shared_meta_->freeptr, freeptr, new_freeptr) != freeptr) { | |
210 // Another thread must have completed an allocation while we were working. | |
211 // Try again. | |
212 continue; | |
213 } | |
214 | |
215 // Given that all memory was zeroed before ever being given to an instance | |
216 // of this class and given that we only allocate in a monotomic fashion | |
217 // going forward, it must be that the newly allocated block is completely | |
218 // full of zeros. If we find anything in the block header that is NOT a | |
219 // zero then something must have previously run amuck through memory, | |
220 // writing beyond the allocated space and into unallocated space. | |
221 if (block->size != 0 || | |
222 block->cookie != kBlockCookieFree || | |
223 block->type != 0 || | |
224 subtle::NoBarrier_Load(&block->next) != 0) { | |
225 SetCorrupted(); | |
226 return OFFSET_NULL; | |
227 } | |
228 | |
229 block->size = size; | |
230 block->cookie = kBlockCookieAllocated; | |
231 block->type = type; | |
232 return freeptr; | |
233 } | |
234 } | |
235 | |
236 void SharedMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) { | |
237 int32_t remaining = | |
238 mem_size_ - subtle::NoBarrier_Load(&shared_meta_->freeptr); | |
239 meminfo->total = mem_size_; | |
240 meminfo->free = IsCorrupted() ? 0 : remaining - sizeof(BlockHeader); | |
241 } | |
242 | |
243 void SharedMemoryAllocator::MakeIterable(int32_t offset) { | |
244 if (IsCorrupted()) | |
245 return; | |
246 BlockHeader* block = GetBlock(offset, 0, 0, false); | |
247 if (!block) // invalid offset | |
248 return; | |
249 if (subtle::NoBarrier_Load(&block->next) != 0) // previously set iterable | |
250 return; | |
251 subtle::NoBarrier_Store(&block->next, OFFSET_QUEUE); // will be tail block | |
252 | |
253 // Try to add this block to the tail of the queue. May take multiple tries. | |
254 int32_t tail; | |
255 for (;;) { | |
256 tail = subtle::Acquire_Load(&shared_meta_->tailptr); | |
257 block = GetBlock(tail, 0, 0, true); | |
258 if (!block) { | |
259 SetCorrupted(); | |
260 return; | |
261 } | |
262 int32_t next = subtle::NoBarrier_Load(&block->next); | |
263 | |
264 // Ensure that the tail pointer didn't change while reading next. Only | |
Alexander Potapenko
2015/10/30 21:01:15
If I'm understanding correctly, you don't actually
| |
265 // the read of the tail pointer is atomic but we need to read both the | |
266 // tail pointer and the next pointer from it in an atomic fashion. The | |
267 // way to do this is to read both non-atomically and then verify after | |
268 // the second read that the first read is still valid/unchanged. | |
269 if (tail == subtle::Release_Load(&shared_meta_->tailptr)) { | |
Alexander Potapenko
2015/10/30 20:43:20
The problem with Release_Load() and Acquire_Store(
| |
270 // Check if the found block is truely the last in the queue (i.e. it | |
271 // points back to the "queue" node). | |
272 if (next == OFFSET_QUEUE) { | |
273 // Yes. Try to append the passed block after the current tail block. | |
274 if (subtle::Release_CompareAndSwap( | |
275 &block->next, OFFSET_QUEUE, offset) == OFFSET_QUEUE) { | |
276 // Success! The block is enqueued; need to update the tail pointer. | |
277 break; | |
278 } | |
279 } else { | |
280 // No. Another thread has stopped between the block-next update | |
281 // and the tail-pointer update. Try to update tailptr past the | |
282 // found block. That other thread may complete it first or it | |
283 // may have crashed. Be fail-safe. | |
284 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, next); | |
285 } | |
286 } | |
287 } | |
288 | |
289 // Block has been enqueued. Now update the tail-pointer past it. This | |
290 // could fail if another thread has already completed the operation as | |
291 // part of being fail-safe. | |
292 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, offset); | |
293 } | |
294 | |
295 void SharedMemoryAllocator::CreateIterator(Iterator* state) { | |
296 state->last = OFFSET_QUEUE; | |
297 state->loop_detector = OFFSET_QUEUE; | |
298 } | |
299 | |
300 int32_t SharedMemoryAllocator::GetNextIterable(Iterator* state, int32_t* type) { | |
301 const BlockHeader* block = GetBlock(state->last, 0, 0, true); | |
302 if (!block) // invalid iterator state | |
303 return OFFSET_NULL; | |
304 int32_t next = subtle::NoBarrier_Load(&block->next); | |
305 block = GetBlock(next, 0, 0, false); | |
306 if (!block) // no next allocation in queue | |
307 return OFFSET_NULL; | |
308 if (next == state->loop_detector) { | |
309 SetCorrupted(); | |
310 return OFFSET_NULL; | |
311 } | |
312 | |
313 state->last = next; | |
314 *type = block->type; | |
315 | |
316 // Memory corruption could cause a loop in the list. We need to detect | |
317 // that so as to not cause an infinite loop in the caller. This is done | |
318 // by having a second pointer that double-increments through the list. | |
319 // If it ever comes around to match "last" then we have a loop and need | |
320 // to stop iterating. It's possible to not iterate through all items and | |
321 // it's possible to loop multiple times before the loop is detected but at | |
322 // least it stops. | |
323 if (state->loop_detector == OFFSET_QUEUE) | |
324 state->loop_detector = next; | |
325 block = GetBlock(state->loop_detector, 0, 0, false); | |
326 if (block) { | |
327 state->loop_detector = subtle::NoBarrier_Load(&block->next); | |
328 block = GetBlock(state->loop_detector, 0, 0, false); | |
329 if (block) | |
330 state->loop_detector = subtle::NoBarrier_Load(&block->next); | |
331 } | |
332 | |
333 return next; | |
334 } | |
335 | |
336 void SharedMemoryAllocator::SetCorrupted() { | |
337 LOG(ERROR) << "Corruption detected in shared-memory segment."; | |
338 corrupted_ = true; | |
339 shared_meta_->corrupted = true; | |
340 } | |
341 | |
342 bool SharedMemoryAllocator::IsCorrupted() { | |
343 if (corrupted_ || shared_meta_->corrupted) { | |
344 SetCorrupted(); // Make sure all indicators are set. | |
345 return true; | |
346 } | |
347 return false; | |
348 } | |
349 | |
350 bool SharedMemoryAllocator::IsFull() { | |
351 return shared_meta_->full != 0; | |
352 } | |
353 | |
354 // Dereference a block |offset| and ensure that it's valid for the desired | |
355 // |type| and |size|. |special| indicates that we may try to access block | |
356 // headers not available to callers but still accessed by this module. By | |
357 // having internal dereferences go through this same function, the allocator | |
358 // is hardened against corruption. | |
359 SharedMemoryAllocator::BlockHeader* SharedMemoryAllocator::GetBlock( | |
360 int32_t offset, int32_t type, int32_t size, bool special) { | |
361 // Validation of parameters. | |
362 if (offset % kAllocAlignment != 0) | |
363 return nullptr; | |
364 if (offset < (int)(special ? OFFSET_QUEUE : sizeof(SharedMetadata))) | |
365 return nullptr; | |
366 size += sizeof(BlockHeader); | |
367 if (offset + size > mem_size_) | |
368 return nullptr; | |
369 int32_t freeptr = subtle::NoBarrier_Load(&shared_meta_->freeptr); | |
370 if (offset + size > freeptr) | |
371 return nullptr; | |
372 | |
373 // Validation of referenced block-header. | |
374 const BlockHeader* block = reinterpret_cast<BlockHeader*>(mem_base_ + offset); | |
375 if (block->size < size) | |
376 return nullptr; | |
377 if (!special && block->cookie != kBlockCookieAllocated) | |
378 return nullptr; | |
379 if (type != 0 && block->type != type) | |
380 return nullptr; | |
381 | |
382 // Return pointer to block data. | |
383 return reinterpret_cast<BlockHeader*>(mem_base_ + offset); | |
384 } | |
385 | |
386 void* SharedMemoryAllocator::GetBlockData(int32_t offset, int32_t type, | |
387 int32_t size, bool special) { | |
388 DCHECK(size > 0); | |
389 BlockHeader* block = GetBlock(offset, type, size, special); | |
390 if (!block) | |
391 return nullptr; | |
392 return reinterpret_cast<char*>(block) + sizeof(BlockHeader); | |
393 } | |
394 | |
395 } // namespace base | |
OLD | NEW |