Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/memory/shared_memory_allocator.h" | |
| 6 | |
| 7 #include <assert.h> | |
| 8 | |
| 9 #include "base/logging.h" | |
| 10 | |
| 11 // All integer constants in this file are signed because Atomic32 is signed | |
| 12 // and keeping all others consistent with this avoids a lot of unnecessary | |
| 13 // casting to avoid signed/unsigned operations just to avoid compiler errors. | |
| 14 // This means an occasonal cast of a constant from sizeof() to "int" but | |
| 15 // is far simpler than the alternative. | |
| 16 | |
| 17 namespace { | |
| 18 | |
| 19 // All allocations and data-structures must be aligned to this byte boundary. | |
| 20 // It shouldn't be less than 8 so that 64-bit values can be read in a single | |
|
Dmitry Vyukov
2015/11/03 14:06:46
What architecture do you have in mind? And what ab
bcwhite
2015/11/03 16:28:20
Comment expanded and updated. I don't have a part
| |
| 21 // RAM bus access. 16 can be used so that the block header would always fall | |
| 22 // within a single cache line. | |
| 23 const int32_t kAllocAlignment = 8; | |
| 24 | |
| 25 // A constant (random) value placed in the shared metadata to identify | |
| 26 // an already initialized memory segment. | |
| 27 const int32_t kGlobalCookie = 0x408305DC; | |
| 28 | |
| 29 // The current version of the metadata. If updates are made that change | |
| 30 // the metadata, the version number can be queried to operate in a backward- | |
| 31 // compatible manner until the memory segment is completely re-initalized. | |
| 32 const int32_t kGlobalVersion = 1; | |
| 33 | |
| 34 // Constant values placed in the block headers to indicate its state. | |
| 35 const int32_t kBlockCookieFree = 0; | |
| 36 const int32_t kBlockCookieQueue = 1; | |
| 37 const int32_t kBlockCookieWasted = -1; | |
| 38 const int32_t kBlockCookieAllocated = 0xC8799269; | |
| 39 | |
| 40 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> | |
| 41 // types rather than combined bitfield. | |
| 42 | |
| 43 enum { | |
| 44 kFlagCorrupted, | |
| 45 kFlagFull | |
| 46 }; | |
| 47 | |
| 48 bool CheckFlag(base::subtle::Atomic32* flags, int flag) { | |
| 49 base::subtle::Atomic32 loaded_flags = base::subtle::NoBarrier_Load(flags); | |
|
Alexander Potapenko
2015/11/03 08:12:54
It's not immediately evident whether the currently
bcwhite
2015/11/03 16:28:19
Done.
| |
| 50 return (loaded_flags & 1 << flag) != 0; | |
| 51 } | |
| 52 | |
| 53 void SetFlag(base::subtle::Atomic32* flags, int flag, bool set) { | |
|
Dmitry Vyukov
2015/11/03 14:06:45
You never pass set=false. Please delete it. This c
bcwhite
2015/11/03 16:28:20
Done.
| |
| 54 for (;;) { | |
| 55 base::subtle::Atomic32 loaded_flags = base::subtle::NoBarrier_Load(flags); | |
| 56 base::subtle::Atomic32 new_flags = | |
| 57 (loaded_flags & ~(1 << flag)) | (set ? 1 : 0) << flag; | |
| 58 if (base::subtle::Release_CompareAndSwap( | |
|
Dmitry Vyukov
2015/11/03 14:06:45
You use NoBarrier_Load to load flags, so Release_C
bcwhite
2015/11/03 16:28:20
Done.
| |
| 59 flags, loaded_flags, new_flags) == loaded_flags) { | |
| 60 break; | |
| 61 } | |
| 62 } | |
| 63 } | |
| 64 | |
| 65 } // namespace | |
| 66 | |
| 67 namespace base { | |
| 68 | |
| 69 // The block-header is placed at the top of every allocation within the | |
| 70 // segment to describe the data that follows it. | |
| 71 struct SharedMemoryAllocator::BlockHeader { | |
| 72 int32_t size; // Number of bytes in this block, including header. | |
| 73 int32_t cookie; // Constant value indicating completed allocation. | |
| 74 int32_t type; // A number provided by caller indicating data type. | |
| 75 subtle::Atomic32 next; // Pointer to the next block when iterating | |
|
Dmitry Vyukov
2015/11/03 14:06:45
add . at the end of comment for consistency
bcwhite
2015/11/03 16:28:19
Done.
| |
| 76 }; | |
| 77 | |
| 78 // The shared metadata exists once at the top of the memory segment to | |
| 79 // describe the state of the allocator to all processes. | |
| 80 struct SharedMemoryAllocator::SharedMetadata { | |
| 81 int32_t cookie; // Some value that indicates complete initialization. | |
| 82 int32_t size; // Total size of memory segment. | |
| 83 int32_t page_size; // Paging size within memory segment. | |
| 84 int32_t version; // Version code so upgrades don't break. | |
| 85 subtle::Atomic32 freeptr; // Offset to first free space in the segment. | |
| 86 subtle::Atomic32 flags; // Bitfield of information flags. | |
| 87 int32_t reserved; // Padding to ensure size is multiple of alignment. | |
| 88 | |
| 89 // The "iterable" queue is an M&S Queue as described here, append-only: | |
| 90 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf | |
| 91 subtle::Atomic32 tailptr; // Last block available for iteration. | |
| 92 BlockHeader queue; // Empty block for linked-list head/tail. (must be last) | |
| 93 }; | |
| 94 | |
| 95 // The "queue" block header is used to detect "last node" so that zero/null | |
| 96 // can be used to indicate that it hasn't been added at all. It is part of | |
| 97 // the SharedMetadata structure which itself is always located at offset zero. | |
| 98 // This can't be a constant because SharedMetadata is a private definition. | |
| 99 #define OFFSET_QUEUE offsetof(SharedMetadata, queue) | |
| 100 #define OFFSET_NULL 0 // the equivalest NULL value for an offset | |
| 101 | |
| 102 SharedMemoryAllocator::SharedMemoryAllocator(void* base, int32_t size, | |
| 103 int32_t page_size) | |
| 104 : shared_meta_(static_cast<SharedMetadata*>(base)), | |
| 105 mem_base_(static_cast<char*>(base)), | |
| 106 mem_size_(size), | |
| 107 mem_page_(page_size ? page_size : size), | |
| 108 last_seen_(0), | |
| 109 corrupted_(0) { | |
| 110 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, | |
| 111 "BlockHeader is not a multiple of kAllocAlignment"); | |
| 112 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, | |
| 113 "SharedMetadata is not a multiple of kAllocAlignment"); | |
| 114 | |
| 115 DCHECK(base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0); | |
|
Alexander Potapenko
2015/11/03 08:12:54
These invariants shouldn't be checked often, shoul
bcwhite
2015/11/03 16:28:19
Done.
| |
| 116 DCHECK(size >= 1 << 10 && size <= 1 << 20 && // 1 KiB <= size <= 1 MiB | |
| 117 size % kAllocAlignment == 0); | |
| 118 DCHECK(page_size >= 0 && (page_size == 0 || size % page_size == 0)); | |
| 119 | |
| 120 if (shared_meta_->cookie != kGlobalCookie) { | |
| 121 // This block is only executed when a completely new memory segment is | |
| 122 // being initialized. It's unshared and single-threaded... | |
| 123 const BlockHeader* first_block = reinterpret_cast<BlockHeader*>( | |
| 124 mem_base_ + sizeof(SharedMetadata)); | |
| 125 if (shared_meta_->cookie != 0 || | |
| 126 shared_meta_->size != 0 || | |
| 127 shared_meta_->version != 0 || | |
| 128 subtle::NoBarrier_Load(&shared_meta_->freeptr) != 0 || | |
| 129 subtle::NoBarrier_Load(&shared_meta_->flags) != 0 || | |
| 130 shared_meta_->tailptr != 0 || | |
| 131 shared_meta_->queue.cookie != 0 || | |
| 132 subtle::NoBarrier_Load(&shared_meta_->queue.next) != 0 || | |
| 133 first_block->size != 0 || | |
| 134 first_block->cookie != 0 || | |
| 135 first_block->type != 0 || | |
| 136 first_block->next != 0) { | |
| 137 // ...or something malicious has been playing with the metadata. | |
| 138 NOTREACHED(); | |
| 139 SetCorrupted(); | |
| 140 } | |
| 141 | |
| 142 // This is still safe to do even if corruption has been detected. | |
| 143 shared_meta_->cookie = kGlobalCookie; | |
| 144 shared_meta_->size = size; | |
| 145 shared_meta_->page_size = page_size; | |
| 146 shared_meta_->version = kGlobalVersion; | |
| 147 subtle::NoBarrier_Store(&shared_meta_->freeptr, sizeof(SharedMetadata)); | |
| 148 | |
| 149 // Set up the queue of iterable allocations. | |
| 150 shared_meta_->queue.size = sizeof(BlockHeader); | |
| 151 shared_meta_->queue.cookie = kBlockCookieQueue; | |
| 152 subtle::NoBarrier_Store(&shared_meta_->queue.next, OFFSET_QUEUE); | |
| 153 subtle::NoBarrier_Store(&shared_meta_->tailptr, OFFSET_QUEUE); | |
| 154 } else { | |
| 155 // The allocator is attaching to a previously initialized segment of | |
| 156 // memory. Make sure the embedded data matches what has been passed. | |
| 157 if (shared_meta_->size != size || | |
| 158 shared_meta_->page_size != page_size) { | |
| 159 NOTREACHED(); | |
| 160 SetCorrupted(); | |
| 161 } | |
| 162 } | |
| 163 } | |
| 164 | |
| 165 SharedMemoryAllocator::~SharedMemoryAllocator() { | |
| 166 } | |
| 167 | |
| 168 int32_t SharedMemoryAllocator::Allocate(int32_t size, int32_t type) { | |
| 169 if (size < 0) { | |
| 170 NOTREACHED(); | |
| 171 return OFFSET_NULL; | |
| 172 } | |
| 173 | |
| 174 // Round up the requested size, plus header, to the next allocation alignment. | |
| 175 size += sizeof(BlockHeader); | |
| 176 size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); | |
| 177 if (size > mem_page_) | |
| 178 return OFFSET_NULL; | |
| 179 | |
| 180 // Allocation is lockless so we do all our caculation and then, if saving | |
| 181 // indicates a change has occurred since we started, scrap everything and | |
| 182 // start over. | |
| 183 for (;;) { | |
| 184 if (IsCorrupted()) | |
| 185 return OFFSET_NULL; | |
| 186 | |
| 187 int32_t freeptr = subtle::Acquire_Load(&shared_meta_->freeptr); | |
| 188 if (freeptr + size > mem_size_) { | |
| 189 SetFlag(&shared_meta_->flags, kFlagFull, true); | |
| 190 return OFFSET_NULL; | |
| 191 } | |
| 192 | |
| 193 // Get pointer to the "free" block. It doesn't even have a header; pass | |
| 194 // -sizeof(header) so accouting for that will yield an expected size of | |
| 195 // zero which is what will be stored at that location. If something | |
| 196 // has been allocated since the load of freeptr above, it is still safe | |
| 197 // as nothing will be written to that location until after the CAS below. | |
| 198 BlockHeader* block = GetBlock(freeptr, 0, -(int)sizeof(BlockHeader), true); | |
| 199 if (!block) { | |
| 200 SetCorrupted(); | |
| 201 return OFFSET_NULL; | |
| 202 } | |
| 203 | |
| 204 // An allocation cannot cross page boundaries. If it would, create a | |
| 205 // "wasted" block and begin again at the top of the next page. | |
| 206 int32_t page_free = mem_page_ - freeptr % mem_page_; | |
| 207 if (size > page_free) { | |
| 208 int32_t new_freeptr = freeptr + page_free; | |
| 209 if (subtle::Release_CompareAndSwap( | |
| 210 &shared_meta_->freeptr, freeptr, new_freeptr) == freeptr) { | |
| 211 block->size = page_free; | |
| 212 block->cookie = kBlockCookieWasted; | |
| 213 } | |
| 214 continue; | |
| 215 } | |
| 216 | |
| 217 // Don't leave a slice at the end of a page too small for anything. This | |
| 218 // can result in an allocation up to two alignment-sizes greater than the | |
| 219 // minimum required by requested-size + header + alignment. | |
| 220 if (page_free - size < (int)(sizeof(BlockHeader) + kAllocAlignment)) | |
| 221 size = page_free; | |
| 222 | |
| 223 int32_t new_freeptr = freeptr + size; | |
| 224 if (new_freeptr > mem_size_) { | |
| 225 SetCorrupted(); | |
| 226 return OFFSET_NULL; | |
| 227 } | |
| 228 | |
| 229 if (subtle::Release_CompareAndSwap( | |
| 230 &shared_meta_->freeptr, freeptr, new_freeptr) != freeptr) { | |
| 231 // Another thread must have completed an allocation while we were working. | |
| 232 // Try again. | |
| 233 continue; | |
| 234 } | |
| 235 | |
| 236 // Given that all memory was zeroed before ever being given to an instance | |
| 237 // of this class and given that we only allocate in a monotomic fashion | |
| 238 // going forward, it must be that the newly allocated block is completely | |
| 239 // full of zeros. If we find anything in the block header that is NOT a | |
| 240 // zero then something must have previously run amuck through memory, | |
| 241 // writing beyond the allocated space and into unallocated space. | |
| 242 if (block->size != 0 || | |
| 243 block->cookie != kBlockCookieFree || | |
| 244 block->type != 0 || | |
| 245 subtle::NoBarrier_Load(&block->next) != 0) { | |
| 246 SetCorrupted(); | |
| 247 return OFFSET_NULL; | |
| 248 } | |
| 249 | |
| 250 block->size = size; | |
| 251 block->cookie = kBlockCookieAllocated; | |
| 252 block->type = type; | |
| 253 return freeptr; | |
| 254 } | |
| 255 } | |
| 256 | |
| 257 void SharedMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) { | |
| 258 int32_t remaining = | |
| 259 mem_size_ - subtle::NoBarrier_Load(&shared_meta_->freeptr); | |
| 260 meminfo->total = mem_size_; | |
| 261 meminfo->free = IsCorrupted() ? 0 : remaining - sizeof(BlockHeader); | |
| 262 } | |
| 263 | |
| 264 void SharedMemoryAllocator::MakeIterable(int32_t offset) { | |
| 265 if (IsCorrupted()) | |
| 266 return; | |
| 267 BlockHeader* block = GetBlock(offset, 0, 0, false); | |
| 268 if (!block) // invalid offset | |
| 269 return; | |
| 270 if (subtle::NoBarrier_Load(&block->next) != 0) // previously set iterable | |
| 271 return; | |
| 272 subtle::NoBarrier_Store(&block->next, OFFSET_QUEUE); // will be tail block | |
|
Dmitry Vyukov
2015/11/03 14:06:46
Why don't you use a lock-free stack?
The stack alg
bcwhite
2015/11/03 16:28:19
Honestly, because an M&S Queue was what Alexander
Alexander Potapenko
2015/11/03 17:25:15
Was that another Alexander? I never mentioned M&S
Dmitry Vyukov
2015/11/03 18:15:07
Makes sense.
I guess we could do something along
| |
| 273 | |
| 274 // Try to add this block to the tail of the queue. May take multiple tries. | |
| 275 int32_t tail; | |
| 276 for (;;) { | |
| 277 tail = subtle::Acquire_Load(&shared_meta_->tailptr); | |
| 278 block = GetBlock(tail, 0, 0, true); | |
| 279 if (!block) { | |
| 280 SetCorrupted(); | |
| 281 return; | |
| 282 } | |
| 283 int32_t next = subtle::NoBarrier_Load(&block->next); | |
| 284 | |
| 285 // Ensure that the tail pointer didn't change while reading next. Only | |
| 286 // the read of the tail pointer is atomic but we need to read both the | |
| 287 // tail pointer and the next pointer from it in an atomic fashion. The | |
| 288 // way to do this is to read both non-atomically and then verify after | |
| 289 // the second read that the first read is still valid/unchanged. | |
| 290 if (tail == subtle::Release_Load(&shared_meta_->tailptr)) { | |
| 291 // Check if the found block is truely the last in the queue (i.e. it | |
| 292 // points back to the "queue" node). | |
| 293 if (next == OFFSET_QUEUE) { | |
| 294 // Yes. Try to append the passed block after the current tail block. | |
| 295 if (subtle::Release_CompareAndSwap( | |
| 296 &block->next, OFFSET_QUEUE, offset) == OFFSET_QUEUE) { | |
| 297 // Success! The block is enqueued; need to update the tail pointer. | |
| 298 break; | |
| 299 } | |
| 300 } else { | |
| 301 // No. Another thread has stopped between the block-next update | |
| 302 // and the tail-pointer update. Try to update tailptr past the | |
| 303 // found block. That other thread may complete it first or it | |
| 304 // may have crashed. Be fail-safe. | |
| 305 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, next); | |
| 306 } | |
| 307 } | |
| 308 } | |
| 309 | |
| 310 // Block has been enqueued. Now update the tail-pointer past it. This | |
| 311 // could fail if another thread has already completed the operation as | |
| 312 // part of being fail-safe. | |
| 313 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, offset); | |
| 314 } | |
| 315 | |
| 316 void SharedMemoryAllocator::CreateIterator(Iterator* state) { | |
| 317 state->last = OFFSET_QUEUE; | |
| 318 state->loop_detector = OFFSET_QUEUE; | |
| 319 } | |
| 320 | |
| 321 int32_t SharedMemoryAllocator::GetNextIterable(Iterator* state, int32_t* type) { | |
| 322 const BlockHeader* block = GetBlock(state->last, 0, 0, true); | |
| 323 if (!block) // invalid iterator state | |
| 324 return OFFSET_NULL; | |
| 325 int32_t next = subtle::NoBarrier_Load(&block->next); | |
| 326 block = GetBlock(next, 0, 0, false); | |
| 327 if (!block) // no next allocation in queue | |
| 328 return OFFSET_NULL; | |
| 329 if (next == state->loop_detector) { | |
| 330 SetCorrupted(); | |
| 331 return OFFSET_NULL; | |
| 332 } | |
| 333 | |
| 334 state->last = next; | |
| 335 *type = block->type; | |
| 336 | |
| 337 // Memory corruption could cause a loop in the list. We need to detect | |
| 338 // that so as to not cause an infinite loop in the caller. This is done | |
| 339 // by having a second pointer that double-increments through the list. | |
| 340 // If it ever comes around to match "last" then we have a loop and need | |
| 341 // to stop iterating. It's possible to not iterate through all items and | |
| 342 // it's possible to loop multiple times before the loop is detected but at | |
| 343 // least it stops. | |
| 344 if (state->loop_detector == OFFSET_QUEUE) | |
|
Dmitry Vyukov
2015/11/03 14:06:46
There is a simpler way to do it:
count number of
bcwhite
2015/11/03 16:28:19
Great! Though mine worked, it had to change anywa
| |
| 345 state->loop_detector = next; | |
| 346 block = GetBlock(state->loop_detector, 0, 0, false); | |
| 347 if (block) { | |
| 348 state->loop_detector = subtle::NoBarrier_Load(&block->next); | |
| 349 block = GetBlock(state->loop_detector, 0, 0, false); | |
| 350 if (block) | |
| 351 state->loop_detector = subtle::NoBarrier_Load(&block->next); | |
| 352 } | |
| 353 | |
| 354 return next; | |
| 355 } | |
| 356 | |
| 357 void SharedMemoryAllocator::SetCorrupted() { | |
| 358 LOG(ERROR) << "Corruption detected in shared-memory segment."; | |
| 359 subtle::NoBarrier_Store(&corrupted_, 1); | |
|
Alexander Potapenko
2015/11/03 08:12:54
Why do you need both corrupted_ and kFlagCorrupted
bcwhite
2015/11/03 16:28:19
The shared flag can't be trusted since a malicious
| |
| 360 SetFlag(&shared_meta_->flags, kFlagCorrupted, true); | |
| 361 } | |
| 362 | |
| 363 bool SharedMemoryAllocator::IsCorrupted() { | |
| 364 if (subtle::NoBarrier_Load(&corrupted_) || | |
| 365 CheckFlag(&shared_meta_->flags, kFlagCorrupted)) { | |
| 366 SetCorrupted(); // Make sure all indicators are set. | |
| 367 return true; | |
| 368 } | |
| 369 return false; | |
| 370 } | |
| 371 | |
| 372 bool SharedMemoryAllocator::IsFull() { | |
| 373 return CheckFlag(&shared_meta_->flags, kFlagFull); | |
| 374 } | |
| 375 | |
| 376 // Dereference a block |offset| and ensure that it's valid for the desired | |
| 377 // |type| and |size|. |special| indicates that we may try to access block | |
| 378 // headers not available to callers but still accessed by this module. By | |
| 379 // having internal dereferences go through this same function, the allocator | |
| 380 // is hardened against corruption. | |
| 381 SharedMemoryAllocator::BlockHeader* SharedMemoryAllocator::GetBlock( | |
| 382 int32_t offset, int32_t type, int32_t size, bool special) { | |
| 383 // Validation of parameters. | |
| 384 if (offset % kAllocAlignment != 0) | |
| 385 return nullptr; | |
| 386 if (offset < (int)(special ? OFFSET_QUEUE : sizeof(SharedMetadata))) | |
| 387 return nullptr; | |
| 388 size += sizeof(BlockHeader); | |
| 389 if (offset + size > mem_size_) | |
| 390 return nullptr; | |
| 391 int32_t freeptr = subtle::NoBarrier_Load(&shared_meta_->freeptr); | |
| 392 if (offset + size > freeptr) | |
| 393 return nullptr; | |
| 394 | |
| 395 // Validation of referenced block-header. | |
| 396 const BlockHeader* block = reinterpret_cast<BlockHeader*>(mem_base_ + offset); | |
| 397 if (block->size < size) | |
| 398 return nullptr; | |
| 399 if (!special && block->cookie != kBlockCookieAllocated) | |
| 400 return nullptr; | |
| 401 if (type != 0 && block->type != type) | |
| 402 return nullptr; | |
| 403 | |
| 404 // Return pointer to block data. | |
| 405 return reinterpret_cast<BlockHeader*>(mem_base_ + offset); | |
| 406 } | |
| 407 | |
| 408 void* SharedMemoryAllocator::GetBlockData(int32_t offset, int32_t type, | |
| 409 int32_t size, bool special) { | |
| 410 DCHECK(size > 0); | |
| 411 BlockHeader* block = GetBlock(offset, type, size, special); | |
| 412 if (!block) | |
| 413 return nullptr; | |
| 414 return reinterpret_cast<char*>(block) + sizeof(BlockHeader); | |
| 415 } | |
| 416 | |
| 417 } // namespace base | |
| OLD | NEW |