Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/memory/persistent_memory_allocator.h" | |
| 6 | |
| 7 #include <assert.h> | |
| 8 #include <algorithm> | |
| 9 | |
| 10 #include "base/logging.h" | |
| 11 #include "base/metrics/histogram_macros.h" | |
| 12 | |
| 13 namespace { | |
| 14 | |
| 15 // Required range of memory segment sizes. It has to fit in an unsigned 32-bit | |
| 16 // number and should be a power of 2 in order to accomodate almost any page | |
| 17 // size. | |
| 18 const uint32_t kSegmentMinSize = 1 << 10; // 1 KiB | |
| 19 const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB | |
| 20 | |
| 21 // All allocations and data-structures must be aligned to this byte boundary. | |
| 22 // Alignment as large as the physical bus between CPU and RAM is _required_ | |
| 23 // for some architectures, is simply more efficient on other CPUs, and | |
| 24 // generally a Good Idea(tm) for all platforms as it reduces/eliminates the | |
| 25 // chance that a type will span cache lines. Alignment mustn't be less | |
| 26 // than 8 to ensure proper alignment for all types. The rest is a balance | |
| 27 // between reducing spans across multiple cache lines and wasted space spent | |
| 28 // padding out allocations. An alignment of 16 would ensure that the block | |
| 29 // header structure always sits in a single cache line. An average of about | |
| 30 // 1/2 this value will be wasted with every allocation. | |
| 31 const uint32_t kAllocAlignment = 8; | |
| 32 | |
| 33 // A constant (random) value placed in the shared metadata to identify | |
| 34 // an already initialized memory segment. | |
| 35 const uint32_t kGlobalCookie = 0x408305DC; | |
| 36 | |
| 37 // The current version of the metadata. If updates are made that change | |
| 38 // the metadata, the version number can be queried to operate in a backward- | |
| 39 // compatible manner until the memory segment is completely re-initalized. | |
| 40 const uint32_t kGlobalVersion = 1; | |
| 41 | |
| 42 // Constant values placed in the block headers to indicate its state. | |
| 43 const uint32_t kBlockCookieFree = 0; | |
| 44 const uint32_t kBlockCookieQueue = 1; | |
| 45 const uint32_t kBlockCookieWasted = (uint32_t)-1; | |
| 46 const uint32_t kBlockCookieAllocated = 0xC8799269; | |
| 47 | |
| 48 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> | |
| 49 // types rather than combined bitfield. | |
| 50 | |
| 51 // Flags stored in the flags_ field of the SharedMetaData structure below. | |
| 52 enum : int { | |
| 53 kFlagCorrupt = 1 << 0, | |
| 54 kFlagFull = 1 << 1 | |
| 55 }; | |
| 56 | |
| 57 bool CheckFlag(volatile std::atomic<uint32_t>* flags, int flag) { | |
| 58 uint32_t loaded_flags = flags->load(); | |
| 59 return (loaded_flags & flag) != 0; | |
| 60 } | |
| 61 | |
| 62 void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) { | |
| 63 uint32_t loaded_flags = flags->load(); | |
| 64 for (;;) { | |
| 65 uint32_t new_flags = (loaded_flags & ~flag) | flag; | |
| 66 // In the failue case, actual "flags" value stored in loaded_flags. | |
| 67 if (flags->compare_exchange_weak(loaded_flags, new_flags)) | |
| 68 break; | |
| 69 } | |
| 70 } | |
| 71 | |
| 72 } // namespace | |
| 73 | |
| 74 namespace base { | |
| 75 | |
| 76 // The block-header is placed at the top of every allocation within the | |
| 77 // segment to describe the data that follows it. | |
| 78 struct PersistentMemoryAllocator::BlockHeader { | |
| 79 uint32_t size; // Number of bytes in this block, including header. | |
| 80 uint32_t cookie; // Constant value indicating completed allocation. | |
| 81 uint32_t type_id; // A number provided by caller indicating data type. | |
| 82 std::atomic<uint32_t> next; // Pointer to the next block when iterating. | |
| 83 }; | |
| 84 | |
| 85 // The shared metadata exists once at the top of the memory segment to | |
| 86 // describe the state of the allocator to all processes. | |
| 87 struct PersistentMemoryAllocator::SharedMetadata { | |
| 88 uint32_t cookie; // Some value that indicates complete initialization. | |
| 89 uint32_t size; // Total size of memory segment. | |
| 90 uint32_t page_size; // Paging size within memory segment. | |
| 91 uint32_t version; // Version code so upgrades don't break. | |
| 92 std::atomic<uint32_t> freeptr; // Offset/ref to first free space in segment. | |
| 93 std::atomic<uint32_t> flags; // Bitfield of information flags. | |
| 94 uint32_t name; // Reference to stored name string. | |
| 95 | |
| 96 // The "iterable" queue is an M&S Queue as described here, append-only: | |
| 97 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf | |
| 98 std::atomic<uint32_t> tailptr; // Last block available for iteration. | |
| 99 BlockHeader queue; // Empty block for linked-list head/tail. (must be last) | |
| 100 }; | |
| 101 | |
| 102 // The "queue" block header is used to detect "last node" so that zero/null | |
| 103 // can be used to indicate that it hasn't been added at all. It is part of | |
| 104 // the SharedMetadata structure which itself is always located at offset zero. | |
| 105 const PersistentMemoryAllocator::Reference | |
| 106 PersistentMemoryAllocator::kReferenceQueue = | |
| 107 offsetof(SharedMetadata, queue); | |
| 108 const PersistentMemoryAllocator::Reference | |
| 109 PersistentMemoryAllocator::kReferenceNull = 0; | |
| 110 | |
| 111 PersistentMemoryAllocator::PersistentMemoryAllocator(void* base, | |
| 112 size_t size, | |
| 113 size_t page_size, | |
| 114 const std::string& name) | |
| 115 : mem_base_(static_cast<char*>(base)), | |
| 116 mem_size_(static_cast<uint32_t>(size)), | |
| 117 mem_page_(static_cast<uint32_t>((page_size ? page_size : size))), | |
| 118 corrupt_(0), | |
| 119 allocs_histogram_(nullptr), | |
| 120 used_histogram_(nullptr) { | |
| 121 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, | |
| 122 "BlockHeader is not a multiple of kAllocAlignment"); | |
| 123 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, | |
| 124 "SharedMetadata is not a multiple of kAllocAlignment"); | |
| 125 | |
| 126 CHECK(base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0); | |
| 127 CHECK(size >= kSegmentMinSize && size <= kSegmentMaxSize && | |
| 128 size % kAllocAlignment == 0); | |
| 129 CHECK(page_size == 0 || size % page_size == 0); | |
| 130 | |
| 131 // These atomics operate inter-process and so must be lock-free. The local | |
| 132 // casts are to make sure it can be evaluated at compile time to a constant. | |
| 133 CHECK(((SharedMetadata*)0)->freeptr.is_lock_free()); | |
| 134 CHECK(((SharedMetadata*)0)->flags.is_lock_free()); | |
| 135 CHECK(((BlockHeader*)0)->next.is_lock_free()); | |
| 136 CHECK(corrupt_.is_lock_free()); // Inter-process so must be lock-free. | |
| 137 | |
| 138 if (shared_meta()->cookie != kGlobalCookie) { | |
| 139 // This block is only executed when a completely new memory segment is | |
| 140 // being initialized. It's unshared and single-threaded... | |
| 141 volatile BlockHeader* const first_block = | |
| 142 reinterpret_cast<volatile BlockHeader*>(mem_base_ + | |
| 143 sizeof(SharedMetadata)); | |
| 144 if (shared_meta()->cookie != 0 || | |
| 145 shared_meta()->size != 0 || | |
| 146 shared_meta()->version != 0 || | |
| 147 shared_meta()->freeptr.load() != 0 || | |
| 148 shared_meta()->flags.load() != 0 || | |
| 149 shared_meta()->name != 0 || | |
| 150 shared_meta()->tailptr != 0 || | |
| 151 shared_meta()->queue.cookie != 0 || | |
| 152 shared_meta()->queue.next.load() != 0 || | |
| 153 first_block->size != 0 || | |
| 154 first_block->cookie != 0 || | |
| 155 first_block->type_id != 0 || | |
| 156 first_block->next != 0) { | |
| 157 // ...or something malicious has been playing with the metadata. | |
| 158 NOTREACHED(); | |
| 159 SetCorrupt(); | |
| 160 } | |
| 161 | |
| 162 // This is still safe to do even if corruption has been detected. | |
| 163 shared_meta()->cookie = kGlobalCookie; | |
| 164 shared_meta()->size = mem_size_; | |
| 165 shared_meta()->page_size = mem_page_; | |
| 166 shared_meta()->version = kGlobalVersion; | |
| 167 shared_meta()->freeptr.store(sizeof(SharedMetadata)); | |
| 168 | |
| 169 // Set up the queue of iterable allocations. | |
| 170 shared_meta()->queue.size = sizeof(BlockHeader); | |
| 171 shared_meta()->queue.cookie = kBlockCookieQueue; | |
| 172 shared_meta()->queue.next.store(kReferenceQueue); | |
| 173 shared_meta()->tailptr.store(kReferenceQueue); | |
| 174 | |
| 175 // Allocate space for the name so other processes can learn it. | |
| 176 if (!name.empty()) { | |
| 177 const size_t name_length = name.length() + 1; | |
| 178 shared_meta()->name = Allocate(name_length, 0); | |
| 179 char* name_cstr = GetAsObject<char>(shared_meta()->name, 0); | |
| 180 if (name_cstr) | |
| 181 strcpy(name_cstr, name.c_str()); | |
| 182 } | |
| 183 } else { | |
| 184 // The allocator is attaching to a previously initialized segment of | |
| 185 // memory. Make sure the embedded data matches what has been passed. | |
| 186 if (shared_meta()->size != mem_size_ || | |
| 187 shared_meta()->page_size != mem_page_) { | |
| 188 NOTREACHED(); | |
| 189 SetCorrupt(); | |
| 190 } | |
| 191 } | |
| 192 | |
| 193 // Metrics are created here so there is no recursion from Allocate | |
| 194 // trying to update a histogram that needs to be created and in turn | |
| 195 // calls Allocate again. | |
| 196 // Some metrics are only active on the primary owner. | |
| 197 if (!name.empty()) { | |
| 198 used_histogram_ = Histogram::FactoryGet( | |
| 199 name + ".UsedKiB", 1, 256 << 10, 100, HistogramBase::kNoFlags); | |
| 200 } | |
| 201 | |
| 202 // Other metrics are active on all users of the memory segment. | |
| 203 Reference name_ref = shared_meta()->name; | |
| 204 char* name_cstr = GetAsObject<char>(name_ref, 0); | |
| 205 if (name_cstr) { | |
| 206 size_t name_length = GetAllocSize(name_ref); | |
| 207 while (name_length > 0 && name_cstr[name_length - 1] == '\0') | |
| 208 --name_length; | |
| 209 if (name_length > 0) { | |
| 210 std::string shared_name(name_cstr, name_length); | |
| 211 allocs_histogram_ = Histogram::FactoryGet( | |
| 212 shared_name + ".Allocs", 1, 10000, 50, HistogramBase::kNoFlags); | |
| 213 } | |
| 214 } | |
| 215 } | |
| 216 | |
| 217 PersistentMemoryAllocator::~PersistentMemoryAllocator() { | |
| 218 // It's strictly forbidden to do any memory access here in case there is | |
| 219 // some issue with the underlying memory segment. The "Local" allocator | |
| 220 // makes use of this to allow deletion of the segment on the heap from | |
| 221 // within its destructor. | |
| 222 } | |
| 223 | |
| 224 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) { | |
| 225 volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); | |
| 226 if (!block) | |
| 227 return 0; | |
| 228 uint32_t size = block->size; | |
| 229 // Header was verified by GetBlock() but a malicious actor could change | |
| 230 // the value between there and here. Check it again. | |
| 231 if (size <= sizeof(BlockHeader) || ref + size >= mem_size_) | |
| 232 return 0; | |
| 233 return size - sizeof(BlockHeader); | |
| 234 } | |
| 235 | |
| 236 uint32_t PersistentMemoryAllocator::GetType(Reference ref) { | |
| 237 volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); | |
| 238 if (!block) | |
| 239 return 0; | |
| 240 return block->type_id; | |
| 241 } | |
| 242 | |
| 243 void PersistentMemoryAllocator::SetType(Reference ref, uint32_t type_id) { | |
| 244 volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); | |
| 245 if (!block) | |
| 246 return; | |
| 247 block->type_id = type_id; | |
| 248 } | |
| 249 | |
| 250 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate( | |
| 251 size_t req_size, | |
| 252 uint32_t type_id) { | |
| 253 // Validate req_size to ensure it won't overflow when used as 32-bit value. | |
| 254 if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) { | |
| 255 NOTREACHED(); | |
| 256 return kReferenceNull; | |
| 257 } | |
| 258 | |
| 259 // Round up the requested size, plus header, to the next allocation alignment. | |
| 260 uint32_t size = static_cast<uint32_t>(req_size + sizeof(BlockHeader)); | |
| 261 size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); | |
| 262 if (size <= sizeof(BlockHeader) || size > mem_page_) { | |
| 263 NOTREACHED(); | |
| 264 return kReferenceNull; | |
| 265 } | |
| 266 | |
| 267 // Get the current start of unallocated memory. Other threads may | |
| 268 // update this at any time and cause us to retry these operations. | |
| 269 // This value should be treated as "const" to avoid confusion through | |
| 270 // the code below but recognize that any failed compare-exchange operation | |
| 271 // involving it will cause it to be loaded with a more recent value. The | |
| 272 // code should either exit or restart the loop in that case. | |
| 273 /* const */ uint32_t freeptr = shared_meta()->freeptr.load(); | |
| 274 | |
| 275 // Allocation is lockless so we do all our caculation and then, if saving | |
| 276 // indicates a change has occurred since we started, scrap everything and | |
| 277 // start over. | |
| 278 for (;;) { | |
| 279 if (IsCorrupt()) | |
| 280 return kReferenceNull; | |
| 281 | |
| 282 if (freeptr + size > mem_size_) { | |
| 283 SetFlag(&shared_meta()->flags, kFlagFull); | |
| 284 return kReferenceNull; | |
| 285 } | |
| 286 | |
| 287 // Get pointer to the "free" block. It doesn't even have a header; pass | |
| 288 // -sizeof(header) so accouting for that will yield an expected size of | |
|
Dmitry Vyukov
2015/12/03 20:51:37
We don't pass -sizeof(header) anymore.
bcwhite
2015/12/03 21:53:41
Heh. The fun of documentation. Fixed.
| |
| 289 // zero which is what will be stored at that location. If something | |
| 290 // has been allocated since the load of freeptr above, it is still safe | |
| 291 // as nothing will be written to that location until after the CAS below. | |
| 292 volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true); | |
| 293 if (!block) { | |
| 294 SetCorrupt(); | |
| 295 return kReferenceNull; | |
| 296 } | |
| 297 | |
| 298 // An allocation cannot cross page boundaries. If it would, create a | |
| 299 // "wasted" block and begin again at the top of the next page. This | |
| 300 // area could just be left empty but we fill in the block header just | |
| 301 // for completeness sake. | |
| 302 const uint32_t page_free = mem_page_ - freeptr % mem_page_; | |
| 303 if (size > page_free) { | |
| 304 if (page_free <= sizeof(BlockHeader)) { | |
| 305 SetCorrupt(); | |
| 306 return kReferenceNull; | |
| 307 } | |
| 308 const uint32_t new_freeptr = freeptr + page_free; | |
| 309 if (shared_meta()->freeptr.compare_exchange_strong(freeptr, | |
| 310 new_freeptr)) { | |
| 311 block->size = page_free; | |
| 312 block->cookie = kBlockCookieWasted; | |
| 313 } | |
| 314 continue; | |
| 315 } | |
| 316 | |
| 317 // Don't leave a slice at the end of a page too small for anything. This | |
| 318 // can result in an allocation up to two alignment-sizes greater than the | |
| 319 // minimum required by requested-size + header + alignment. | |
| 320 if (page_free - size < sizeof(BlockHeader) + kAllocAlignment) | |
| 321 size = page_free; | |
| 322 | |
| 323 const uint32_t new_freeptr = freeptr + size; | |
| 324 if (new_freeptr > mem_size_) { | |
| 325 SetCorrupt(); | |
| 326 return kReferenceNull; | |
| 327 } | |
| 328 | |
| 329 // Save our work. Try again if another thread has completed an allocation | |
| 330 // while we were processing. A "weak" exchange would be permissable here | |
| 331 // because the code will just loop and try again but the above processing | |
| 332 // is significant so make the extra effort of a "strong" exchange. | |
| 333 if (!shared_meta()->freeptr.compare_exchange_strong(freeptr, new_freeptr)) | |
| 334 continue; | |
| 335 | |
| 336 // Record this allocation in usage stats (if active). This is safe | |
| 337 // to call at this point because the allocation is complete. | |
| 338 if (allocs_histogram_) | |
| 339 allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size)); | |
| 340 | |
| 341 // Given that all memory was zeroed before ever being given to an instance | |
| 342 // of this class and given that we only allocate in a monotomic fashion | |
| 343 // going forward, it must be that the newly allocated block is completely | |
| 344 // full of zeros. If we find anything in the block header that is NOT a | |
| 345 // zero then something must have previously run amuck through memory, | |
| 346 // writing beyond the allocated space and into unallocated space. | |
| 347 if (block->size != 0 || | |
| 348 block->cookie != kBlockCookieFree || | |
| 349 block->type_id != 0 || | |
| 350 block->next.load() != 0) { | |
| 351 SetCorrupt(); | |
| 352 return kReferenceNull; | |
| 353 } | |
| 354 | |
| 355 block->size = size; | |
| 356 block->cookie = kBlockCookieAllocated; | |
| 357 block->type_id = type_id; | |
| 358 return freeptr; | |
| 359 } | |
| 360 } | |
| 361 | |
| 362 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) { | |
| 363 uint32_t remaining = mem_size_ - shared_meta()->freeptr.load(); | |
| 364 meminfo->total = mem_size_; | |
| 365 meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader); | |
| 366 } | |
| 367 | |
| 368 void PersistentMemoryAllocator::MakeIterable(Reference ref) { | |
| 369 if (IsCorrupt()) | |
| 370 return; | |
| 371 volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false); | |
| 372 if (!block) // invalid reference | |
| 373 return; | |
| 374 if (block->next.load(std::memory_order_acquire) != 0) // Already iterable. | |
| 375 return; | |
| 376 block->next.store(kReferenceQueue, std::memory_order_release); // New tail. | |
| 377 | |
| 378 // Try to add this block to the tail of the queue. May take multiple tries. | |
| 379 // If so, tail will be automatically updated with a more recent value during | |
| 380 // compare-exchange operations. | |
| 381 uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire); | |
| 382 for (;;) { | |
| 383 // Acquire the current tail-pointer released by previous call to this | |
| 384 // method and validate it. | |
| 385 block = GetBlock(tail, 0, 0, true, false); | |
| 386 if (!block) { | |
| 387 SetCorrupt(); | |
| 388 return; | |
| 389 } | |
| 390 | |
| 391 // Try to insert the block at the tail of the queue. The tail node always | |
| 392 // has an existing value of kReferenceQueue; if that is somehow not the | |
| 393 // existing value then another thread has acted in the meantime. | |
| 394 uint32_t next = kReferenceQueue; // Will get replaced with existing value. | |
| 395 if (block->next.compare_exchange_strong(next, ref, | |
|
Dmitry Vyukov
2015/12/03 20:51:37
Since this file is so rich with comments and you h
bcwhite
2015/12/03 21:53:41
Done.
| |
| 396 std::memory_order_release, | |
| 397 std::memory_order_acquire)) { | |
| 398 // Update the tail pointer to the new offset. If the "else" clause did | |
| 399 // not exist, then this could be a simple Release_Store to set the new | |
| 400 // value but because it does, it's possible that other threads could add | |
| 401 // one or more nodes at the tail before reaching this point. We don't | |
| 402 // have to check the return value because it either operates correctly | |
| 403 // or the exact same operation has already been done (by the "else" | |
| 404 // clause) on some other thread. | |
| 405 shared_meta()->tailptr.compare_exchange_strong(tail, ref, | |
| 406 std::memory_order_release, | |
| 407 std::memory_order_relaxed); | |
| 408 return; | |
| 409 } else { | |
| 410 // In the unlikely case that a thread crashed or was killed between the | |
| 411 // update of "next" and the update of "tailptr", it is necessary to | |
| 412 // perform the operation that would have been done. There's no explicit | |
| 413 // check for crash/kill which means that this operation may also happen | |
| 414 // even when the other thread is in perfect working order which is what | |
| 415 // necessitates the CompareAndSwap above. | |
| 416 shared_meta()->tailptr.compare_exchange_strong(tail, next, | |
| 417 std::memory_order_release, | |
| 418 std::memory_order_acquire); | |
|
Dmitry Vyukov
2015/12/03 20:51:37
FWIW, the holy standard says:
20.7.2.5
template
bcwhite
2015/12/03 21:53:41
Done. Of note, the standard also says...
http://e
| |
| 419 } | |
| 420 } | |
| 421 } | |
| 422 | |
| 423 void PersistentMemoryAllocator::CreateIterator(Iterator* state) { | |
| 424 state->last = kReferenceQueue; | |
| 425 state->niter = 0; | |
| 426 } | |
| 427 | |
| 428 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetNextIterable( | |
| 429 Iterator* state, | |
| 430 uint32_t* type_id) { | |
| 431 volatile BlockHeader* block = GetBlock(state->last, 0, 0, true, false); | |
| 432 if (!block) // invalid iterator state | |
| 433 return kReferenceNull; | |
| 434 | |
| 435 // The compiler and CPU can freely reorder all memory accesses on which | |
| 436 // there are no dependencies. It could, for example, move the load of | |
| 437 // "freeptr" above this point because there are no explicit dependencies | |
| 438 // between it and "next". If it did, however, then another block could | |
| 439 // be queued after that but before the following load meaning there is | |
| 440 // one more queued block than the future "detect loop by having more | |
| 441 // blocks that could fit before freeptr" will allow. | |
| 442 // | |
| 443 // By "acquiring" the "next" value here, it's synchronized to the enqueue | |
| 444 // of the node which in turn is synchronized to the allocation (which sets | |
| 445 // freeptr). Thus, the scenario above cannot happen. | |
| 446 uint32_t next = block->next.load(std::memory_order_acquire); | |
| 447 block = GetBlock(next, 0, 0, false, false); | |
| 448 if (!block) // no next allocation in queue | |
| 449 return kReferenceNull; | |
| 450 | |
| 451 // Memory corruption could cause a loop in the list. We need to detect | |
| 452 // that so as to not cause an infinite loop in the caller. We do this | |
| 453 // simply by making sure we don't iterate more than the absolute maximum | |
| 454 // number of allocations that could have been made. Callers are likely | |
| 455 // to loop multiple times before it is detected but at least it stops. | |
| 456 uint32_t freeptr = std::min( | |
| 457 shared_meta()->freeptr.load(std::memory_order_acquire), | |
| 458 mem_size_); | |
| 459 if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) { | |
| 460 SetCorrupt(); | |
| 461 return kReferenceNull; | |
| 462 } | |
| 463 | |
| 464 state->last = next; | |
| 465 state->niter++; | |
| 466 *type_id = block->type_id; | |
| 467 | |
| 468 return next; | |
| 469 } | |
| 470 | |
| 471 // The "corrupted" state is held both locally and globally (shared). The | |
| 472 // shared flag can't be trusted since a malicious actor could overwrite it. | |
| 473 // The local version is immune to foreign actors. Thus, if seen shared, | |
| 474 // copy it locally and, once known, always restore it globally. | |
| 475 void PersistentMemoryAllocator::SetCorrupt() { | |
| 476 LOG(ERROR) << "Corruption detected in shared-memory segment."; | |
| 477 corrupt_.store(true); | |
| 478 SetFlag(&shared_meta()->flags, kFlagCorrupt); | |
| 479 } | |
| 480 | |
| 481 bool PersistentMemoryAllocator::IsCorrupt() { | |
| 482 if (corrupt_.load() || CheckFlag(&shared_meta()->flags, kFlagCorrupt)) { | |
| 483 SetCorrupt(); // Make sure all indicators are set. | |
| 484 return true; | |
| 485 } | |
| 486 return false; | |
| 487 } | |
| 488 | |
| 489 bool PersistentMemoryAllocator::IsFull() { | |
| 490 return CheckFlag(&shared_meta()->flags, kFlagFull); | |
| 491 } | |
| 492 | |
| 493 // Dereference a block |ref| and ensure that it's valid for the desired | |
| 494 // |type_id| and |size|. |special| indicates that we may try to access block | |
| 495 // headers not available to callers but still accessed by this module. By | |
| 496 // having internal dereferences go through this same function, the allocator | |
| 497 // is hardened against corruption. | |
| 498 volatile PersistentMemoryAllocator::BlockHeader* | |
| 499 PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id, | |
| 500 uint32_t size, bool queue_ok, | |
| 501 bool free_ok) { | |
| 502 // Validation of parameters. | |
| 503 if (ref % kAllocAlignment != 0) | |
| 504 return nullptr; | |
| 505 if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata))) | |
| 506 return nullptr; | |
| 507 size += sizeof(BlockHeader); | |
| 508 if (ref + size > mem_size_) | |
| 509 return nullptr; | |
| 510 | |
| 511 // Validation of referenced block-header. | |
| 512 if (!free_ok) { | |
| 513 uint32_t freeptr = shared_meta()->freeptr.load(); | |
| 514 if (ref + size > freeptr) | |
| 515 return nullptr; | |
| 516 volatile BlockHeader* const block = | |
| 517 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); | |
| 518 if (block->size < size) | |
| 519 return nullptr; | |
| 520 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated) | |
| 521 return nullptr; | |
| 522 if (type_id != 0 && block->type_id != type_id) | |
| 523 return nullptr; | |
| 524 } | |
| 525 | |
| 526 // Return pointer to block data. | |
| 527 return reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); | |
| 528 } | |
| 529 | |
| 530 volatile void* PersistentMemoryAllocator::GetBlockData(Reference ref, | |
| 531 uint32_t type_id, | |
| 532 uint32_t size) { | |
| 533 DCHECK(size > 0); | |
| 534 volatile BlockHeader* block = GetBlock(ref, type_id, size, false, false); | |
| 535 if (!block) | |
| 536 return nullptr; | |
| 537 return reinterpret_cast<volatile char*>(block) + sizeof(BlockHeader); | |
| 538 } | |
| 539 | |
| 540 void PersistentMemoryAllocator::UpdateStaticHistograms() { | |
| 541 if (used_histogram_) { | |
| 542 MemoryInfo meminfo; | |
| 543 GetMemoryInfo(&meminfo); | |
| 544 HistogramBase::Sample usedkb = static_cast<HistogramBase::Sample>( | |
| 545 (meminfo.total - meminfo.free) >> 10); | |
| 546 used_histogram_->Add(usedkb); | |
| 547 } | |
| 548 } | |
| 549 | |
| 550 //----- LocalPersistentMemoryAllocator ----------------------------------------- | |
| 551 | |
| 552 LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator( | |
| 553 size_t size, | |
| 554 const std::string& name) | |
| 555 : PersistentMemoryAllocator(memset(new char[size], 0, size), | |
| 556 size, 0, name) {} | |
| 557 | |
| 558 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() { | |
| 559 delete mem_base_; | |
| 560 } | |
| 561 | |
| 562 } // namespace base | |
| OLD | NEW |