Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/memory/shared_memory_allocator.h" | |
| 6 | |
| 7 #include <assert.h> | |
| 8 | |
| 9 #include "base/atomicops.h" | |
| 10 #include "base/logging.h" | |
| 11 | |
| 12 namespace { | |
| 13 | |
| 14 // All allocations and data-structures must be aligned to this byte boundary. | |
| 15 // It shouldn't be less than 8 so that 64-bit values can be read in a single | |
| 16 // RAM bus access. 16 was chosen so that the block header would always fall | |
|
Alexander Potapenko
2015/10/30 06:53:13
Nit: please 's/. /. ' here and below.
| |
| 17 // within a single cache line. | |
| 18 const int32_t kAllocAlignment = 16; | |
| 19 | |
| 20 // A constant (random) value placed in the shared metadata to identify | |
| 21 // an already initialized memory segment. | |
| 22 const int32_t kGlobalCookie = 0x408305DC; | |
| 23 | |
| 24 // The current version of the metadata. If updates are made that change | |
| 25 // the metadata, the version number can be queried to operate in a backward- | |
| 26 // compatible manner until the memory segment is completely re-initalized. | |
| 27 const int32_t kGlobalVersion = 1; | |
| 28 | |
| 29 // Constant values placed in the block headers to indicate its state. | |
| 30 const int32_t kBlockCookieFree = 0; | |
| 31 const int32_t kBlockCookieQueue = 1; | |
| 32 const int32_t kBlockCookieWasted = -1; | |
| 33 const int32_t kBlockCookieAllocated = 0xC8799269; | |
| 34 | |
| 35 } // namespace | |
| 36 | |
| 37 namespace base { | |
| 38 | |
| 39 // The block-header is placed at the top of every allocation within the | |
| 40 // segment to describe the data that follows it. | |
| 41 struct SharedMemoryAllocator::BlockHeader { | |
| 42 int32_t size; // number of bytes in this block, including header | |
|
Alexander Potapenko
2015/10/30 06:53:13
Please fix the comments according to https://googl
bcwhite
2015/10/30 14:01:10
Done.
| |
| 43 int32_t cookie; // constant value indicating completed allocation | |
|
Alexander Potapenko
2015/10/30 06:53:13
I think it's better to align the comments to this
bcwhite
2015/10/30 14:01:10
*I* prefer it but I've gotten nags in the past tha
chrisha
2015/10/30 15:11:54
How about letting "git cl format" solve this probl
| |
| 44 int32_t type; // a number provided by caller indicating data type | |
| 45 subtle::Atomic32 next; // pointer to the next block when iterating | |
| 46 }; | |
| 47 | |
| 48 // The shared metadata exists once at the top of the memory segment to | |
| 49 // describe the state of the allocator to all processes. | |
| 50 struct SharedMemoryAllocator::SharedMetadata { | |
| 51 int32_t cookie; // some value that indicates complete initialization | |
| 52 int32_t size; // total size of memory segment | |
| 53 int32_t version; // version code so upgrades don't break | |
| 54 subtle::Atomic32 freeptr; // offset to first free space in the segment | |
| 55 int32_t reserved[2]; // padding to ensure size is multiple of alignment | |
|
Alexander Potapenko
2015/10/30 06:53:13
I think it's more common to put the padding at the
bcwhite
2015/10/30 14:01:10
The "BlockHeader" entry must be the very last entr
| |
| 56 char corrupted; // flag indicating that corruption has been detected | |
| 57 char full; // flag indicating that alloc failed because segment is full | |
| 58 char flags[2]; // align to next int (not strictly needed but avoid confusion) | |
| 59 | |
| 60 // The "iterable" queue is an M&S Queue as described here, append-only: | |
| 61 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf | |
| 62 subtle::Atomic32 tailptr; // last block available for iteration | |
| 63 BlockHeader queue; // empty block for linked-list head/tail (must be last) | |
| 64 }; | |
| 65 | |
| 66 // The "queue" block header is used to detect "last node" so that zero/null | |
| 67 // can be used to indicate that it hasn't been added at all. It is part of | |
| 68 // the SharedMetadata structure which itself is always located at offset zero. | |
| 69 // This can't be a constant because SharedMetadata is a private definition. | |
| 70 #define OFFSET_QUEUE offsetof(SharedMetadata, queue) | |
| 71 #define OFFSET_NULL 0 // the equivalest NULL value for an offset | |
|
Alexander Potapenko
2015/10/30 06:53:13
I think '0' is good enough to not introduce a cons
bcwhite
2015/10/30 14:01:10
I could just use NULL since it is defined as ``0''
| |
| 72 | |
| 73 SharedMemoryAllocator::SharedMemoryAllocator(void* base, int32_t size, | |
| 74 int32_t page) | |
|
Alexander Potapenko
2015/10/30 06:53:13
s/page/page_size/
bcwhite
2015/10/30 14:01:10
Done.
| |
| 75 : shared_meta_(static_cast<SharedMetadata*>(base)), | |
| 76 mem_base_(static_cast<char*>(base)), | |
| 77 mem_size_(size), | |
| 78 mem_page_(page ? page : size), | |
| 79 last_seen_(0), | |
| 80 corrupted_(false) { | |
| 81 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, | |
| 82 "BlockHeader is not a multiple of kAllocAlignment"); | |
| 83 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, | |
| 84 "SharedMetadata is not a multiple of kAllocAlignment"); | |
| 85 | |
| 86 DCHECK(base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0); | |
| 87 DCHECK(size >= 1 << 10 && size <= 1 << 20 && // 1 KiB <= size <= 1 MiB | |
| 88 size % kAllocAlignment == 0); | |
| 89 DCHECK(page >= 0 && (page == 0 || size % page == 0)); | |
| 90 | |
| 91 if (shared_meta_->cookie != kGlobalCookie) { | |
| 92 // This block is only executed when a completely new memory segment is | |
| 93 // being initialized. It's unshared and single-threaded... | |
| 94 const BlockHeader* first_block = reinterpret_cast<BlockHeader*>( | |
| 95 mem_base_ + sizeof(SharedMetadata)); | |
| 96 if (shared_meta_->cookie != 0 || | |
|
Alexander Potapenko
2015/10/30 06:53:13
Can these checks be moved to the SharedMetadata co
bcwhite
2015/10/30 14:01:10
I don't think so because (1) it's never constructe
| |
| 97 shared_meta_->size != 0 || | |
| 98 shared_meta_->version != 0 || | |
| 99 shared_meta_->freeptr != 0 || | |
|
Alexander Potapenko
2015/10/30 06:53:13
shared_meta_->freeptr is an atomic and should be a
bcwhite
2015/10/30 14:01:10
Right. It doesn't actually affect anything at thi
| |
| 100 shared_meta_->corrupted != 0 || | |
| 101 shared_meta_->full != 0 || | |
| 102 shared_meta_->tailptr != 0 || | |
| 103 shared_meta_->queue.cookie != 0 || | |
| 104 shared_meta_->queue.next != 0 || | |
| 105 first_block->size != 0 || | |
| 106 first_block->cookie != 0 || | |
| 107 first_block->type != 0 || | |
| 108 first_block->next != 0) { | |
| 109 // ...or something malicious has been playing with the metadata. | |
| 110 SetCorrupted(); | |
| 111 } | |
| 112 | |
| 113 // This is still safe to do even if corruption has been detected. | |
|
Alexander Potapenko
2015/10/30 06:53:13
Why bother if we've detected the corruption? Shoul
bcwhite
2015/10/30 14:01:10
It doesn't change anything either way. I opted fo
| |
| 114 shared_meta_->cookie = kGlobalCookie; | |
| 115 shared_meta_->size = size; | |
| 116 shared_meta_->version = kGlobalVersion; | |
| 117 subtle::NoBarrier_Store(&shared_meta_->freeptr, sizeof(SharedMetadata)); | |
|
Alexander Potapenko
2015/10/30 06:53:13
Shouldn't this be a Release_Store?
shared_meta_->f
bcwhite
2015/10/30 14:01:10
My understanding is that acquire/release affects o
| |
| 118 | |
| 119 // Set up the queue of iterable allocations. | |
| 120 shared_meta_->queue.size = sizeof(BlockHeader); | |
| 121 shared_meta_->queue.cookie = kBlockCookieQueue; | |
| 122 subtle::NoBarrier_Store(&shared_meta_->queue.next, OFFSET_QUEUE); | |
| 123 subtle::NoBarrier_Store(&shared_meta_->tailptr, OFFSET_QUEUE); | |
| 124 } | |
| 125 } | |
| 126 | |
| 127 SharedMemoryAllocator::~SharedMemoryAllocator() { | |
| 128 } | |
| 129 | |
| 130 int32_t SharedMemoryAllocator::Allocate(int32_t size, int32_t type) { | |
| 131 if (size < 0) { | |
| 132 NOTREACHED(); | |
| 133 return OFFSET_NULL; | |
| 134 } | |
| 135 | |
| 136 // Round up the requested size, plus header, to the next allocation alignment. | |
| 137 size += sizeof(BlockHeader); | |
| 138 size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); | |
| 139 if (size > mem_page_) | |
| 140 return OFFSET_NULL; | |
| 141 | |
| 142 // Allocation is lockless so we do all our caculation and then, if saving | |
| 143 // indicates a change has occurred since we started, scrap everything and | |
| 144 // start over. | |
| 145 for (;;) { | |
| 146 if (IsCorrupted()) | |
| 147 return OFFSET_NULL; | |
| 148 | |
| 149 int32_t freeptr = subtle::Acquire_Load(&shared_meta_->freeptr); | |
| 150 if (freeptr + size > mem_size_) { | |
| 151 shared_meta_->full = true; | |
| 152 return OFFSET_NULL; | |
| 153 } | |
| 154 BlockHeader* block = GetBlock(freeptr, 0, 0, true); | |
| 155 if (!block) { | |
| 156 SetCorrupted(); | |
| 157 return OFFSET_NULL; | |
| 158 } | |
| 159 | |
| 160 // An allocation cannot cross page boundaries. If it would, create a | |
| 161 // "wasted" block and begin again at the top of the next page. | |
| 162 int32_t page_free = mem_page_ - freeptr % mem_page_; | |
| 163 if (size > page_free) { | |
| 164 int32_t new_freeptr = freeptr + page_free; | |
| 165 if (subtle::Release_CompareAndSwap( | |
| 166 &shared_meta_->freeptr, freeptr, new_freeptr) == freeptr) { | |
| 167 block->size = page_free; | |
| 168 block->cookie = kBlockCookieWasted; | |
| 169 } | |
| 170 continue; | |
| 171 } | |
| 172 | |
| 173 // Don't leave a slice at the end of a page too small for anything. | |
| 174 if (page_free - size < (int)(sizeof(BlockHeader) + kAllocAlignment)) | |
| 175 size = page_free; | |
|
Alexander Potapenko
2015/10/30 06:53:13
It may be worth noticing in the comments that the
bcwhite
2015/10/30 14:01:10
Done.
| |
| 176 | |
| 177 int32_t new_freeptr = freeptr + size; | |
| 178 if (new_freeptr > mem_size_) { | |
| 179 SetCorrupted(); | |
| 180 return OFFSET_NULL; | |
| 181 } | |
| 182 | |
| 183 if (subtle::Release_CompareAndSwap( | |
| 184 &shared_meta_->freeptr, freeptr, new_freeptr) != freeptr) { | |
| 185 // Another thread must have completed an allocation while we were working. | |
| 186 // Try again. | |
| 187 continue; | |
| 188 } | |
| 189 | |
| 190 // Since allocating a block is atomic and all unallocated memory must be | |
| 191 // zeros, any other value indicates that something has run amuck. | |
|
Alexander Potapenko
2015/10/30 06:53:13
Can you please elaborate on how's that possible?
bcwhite
2015/10/30 14:01:10
Done.
| |
| 192 if (block->size != 0 || | |
| 193 block->cookie != kBlockCookieFree || | |
| 194 block->type != 0 || | |
| 195 subtle::NoBarrier_Load(&block->next) != 0) { | |
| 196 SetCorrupted(); | |
| 197 return OFFSET_NULL; | |
| 198 } | |
| 199 | |
| 200 block->size = size; | |
| 201 block->cookie = kBlockCookieAllocated; | |
| 202 block->type = type; | |
| 203 return freeptr; | |
| 204 } | |
| 205 } | |
| 206 | |
| 207 void SharedMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) { | |
| 208 int32_t remaining = | |
| 209 mem_size_ - subtle::NoBarrier_Load(&shared_meta_->freeptr); | |
| 210 meminfo->total = mem_size_; | |
| 211 meminfo->free = shared_meta_->corrupted ? 0 : remaining - sizeof(BlockHeader); | |
|
Alexander Potapenko
2015/10/30 06:53:13
Shouldn't we use IsCorrupted() here?
bcwhite
2015/10/30 14:01:10
Definitely.
| |
| 212 } | |
| 213 | |
| 214 void SharedMemoryAllocator::MakeIterable(int32_t offset) { | |
| 215 if (IsCorrupted()) | |
| 216 return; | |
| 217 BlockHeader* block = GetBlock(offset, 0, 0, false); | |
| 218 if (!block) // invalid offset | |
| 219 return; | |
| 220 if (subtle::NoBarrier_Load(&block->next) != 0) // previously set iterable | |
| 221 return; | |
| 222 subtle::NoBarrier_Store(&block->next, OFFSET_QUEUE); // will be tail block | |
| 223 | |
| 224 // Try to add this block to the tail of the queue. May take multiple tries. | |
| 225 int32_t tail; | |
| 226 for (;;) { | |
| 227 tail = subtle::Acquire_Load(&shared_meta_->tailptr); | |
| 228 block = GetBlock(tail, 0, 0, true); | |
| 229 if (!block) { | |
| 230 SetCorrupted(); | |
| 231 return; | |
| 232 } | |
| 233 int32_t next = subtle::NoBarrier_Load(&block->next); | |
| 234 | |
| 235 // Ensure that the tail pointer didn't change while reading next. | |
|
Alexander Potapenko
2015/10/30 06:53:13
Why do you need that?
What happens if the tail poi
bcwhite
2015/10/30 14:01:10
Only the read of the tail pointer is atomic but we
| |
| 236 if (tail == subtle::Release_Load(&shared_meta_->tailptr)) { | |
|
Alexander Potapenko
2015/10/30 06:53:13
Most certainly you do not want to use Release_Load
bcwhite
2015/10/30 14:01:10
Why? It ensures no previous memory access in the
| |
| 237 // Check if the found block is truely the last in the queue (i.e. it | |
| 238 // points back to the "queue" node). | |
| 239 if (next == OFFSET_QUEUE) { | |
| 240 // Yes. Try to append the passed block after the current tail block. | |
| 241 if (subtle::Release_CompareAndSwap( | |
| 242 &block->next, OFFSET_QUEUE, offset) == OFFSET_QUEUE) { | |
|
Alexander Potapenko
2015/10/30 06:53:13
Please mind the indentation.
bcwhite
2015/10/30 14:01:10
Isn't it supposed to be indented 4 spaces from the
chrisha
2015/10/30 15:11:53
git cl format will take care of these things for y
bcwhite
2015/10/30 15:15:35
I tried that. It made it worse (IMO) by splitting
| |
| 243 // Success! The block is enqueued; need to update the tail pointer. | |
| 244 break; | |
| 245 } | |
| 246 } else { | |
| 247 // No. Another thread has stopped between the block-next update | |
| 248 // and the tail-pointer update. Try to update tailptr past the | |
| 249 // found block. That other thread may complete it first or it | |
| 250 // may have crashed. Be fail-safe. | |
| 251 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, next); | |
| 252 } | |
| 253 } | |
| 254 } | |
| 255 | |
| 256 // Block has been enqueued. Now update the tail-pointer past it. This | |
| 257 // could fail if another thread has already completed the operation as | |
| 258 // part of being fail-safe. | |
| 259 subtle::Release_CompareAndSwap(&shared_meta_->tailptr, tail, offset); | |
| 260 } | |
| 261 | |
| 262 int32_t SharedMemoryAllocator::GetFirstIterable(Iterator* state, | |
| 263 int32_t* type) { | |
| 264 state->last = OFFSET_QUEUE; | |
| 265 return GetNextIterable(state, type); | |
| 266 } | |
| 267 | |
| 268 int32_t SharedMemoryAllocator::GetNextIterable(Iterator* state, int32_t* type) { | |
| 269 const BlockHeader* block = GetBlock(state->last, 0, 0, true); | |
| 270 if (!block) // invalid iterator state | |
| 271 return OFFSET_NULL; | |
| 272 int32_t next = subtle::NoBarrier_Load(&block->next); | |
| 273 block = GetBlock(next, 0, 0, false); | |
| 274 if (!block) // no next allocation in queue | |
| 275 return OFFSET_NULL; | |
| 276 | |
| 277 state->last = next; | |
| 278 *type = block->type; | |
| 279 return next; | |
| 280 } | |
| 281 | |
| 282 void SharedMemoryAllocator::SetCorrupted() { | |
|
Alexander Potapenko
2015/10/30 06:53:13
Can SetCorrupted() and IsCorrupted() be called fro
bcwhite
2015/10/30 14:01:10
Certainly. See previous comment on this.
| |
| 283 LOG(ERROR) << "Corruption detected in shared-memory segment."; | |
| 284 corrupted_ = true; | |
| 285 shared_meta_->corrupted = true; | |
| 286 } | |
| 287 | |
| 288 bool SharedMemoryAllocator::IsCorrupted() { | |
| 289 if (corrupted_ || shared_meta_->corrupted) { | |
| 290 SetCorrupted(); // Make sure all indicators are set. | |
| 291 return true; | |
| 292 } | |
| 293 return false; | |
| 294 } | |
| 295 | |
| 296 bool SharedMemoryAllocator::IsFull() { | |
|
Alexander Potapenko
2015/10/30 06:53:13
How will IsFull() be used?
Looks like a data race
bcwhite
2015/10/30 14:01:10
It's just informational to the caller. Data-race
| |
| 297 return shared_meta_->full != 0; | |
| 298 } | |
| 299 | |
| 300 SharedMemoryAllocator::BlockHeader* SharedMemoryAllocator::GetBlock( | |
| 301 int32_t offset, int32_t type, int32_t size, bool special) { | |
| 302 // Validation of parameters. | |
| 303 if (offset % kAllocAlignment != 0) | |
| 304 return nullptr; | |
| 305 if (offset < (int)(special ? OFFSET_QUEUE : sizeof(SharedMetadata))) | |
|
Alexander Potapenko
2015/10/30 06:53:13
It's unclear what 'special' stands for.
bcwhite
2015/10/30 14:01:10
"special" indicates that we may try to access bloc
| |
| 306 return nullptr; | |
| 307 size += sizeof(BlockHeader); | |
| 308 if (offset + size > mem_size_) | |
| 309 return nullptr; | |
| 310 int32_t freeptr = subtle::NoBarrier_Load(&shared_meta_->freeptr); | |
| 311 if (offset + size > freeptr + (int)(special ? sizeof(BlockHeader) : 0)) | |
| 312 return nullptr; | |
| 313 | |
| 314 // Validation of referenced block-header. | |
| 315 const BlockHeader* block = reinterpret_cast<BlockHeader*>(mem_base_ + offset); | |
| 316 if (offset != freeptr && block->size < size) | |
| 317 return nullptr; | |
| 318 if (!special && block->cookie != kBlockCookieAllocated) | |
| 319 return nullptr; | |
| 320 if (type != 0 && block->type != type) | |
| 321 return nullptr; | |
| 322 | |
| 323 // Return pointer to block data. | |
| 324 return reinterpret_cast<BlockHeader*>(mem_base_ + offset); | |
| 325 } | |
| 326 | |
| 327 void* SharedMemoryAllocator::GetBlockData(int32_t offset, int32_t type, | |
| 328 int32_t size, bool special) { | |
| 329 DCHECK(size > 0); | |
| 330 BlockHeader* block = GetBlock(offset, type, size, special); | |
| 331 if (!block) | |
| 332 return nullptr; | |
| 333 return reinterpret_cast<char*>(block) + sizeof(BlockHeader); | |
| 334 } | |
| 335 | |
| 336 } // namespace base | |
| OLD | NEW |