OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/metrics/persistent_memory_allocator.h" |
| 6 |
| 7 #include <assert.h> |
| 8 #include <algorithm> |
| 9 |
| 10 #include "base/files/memory_mapped_file.h" |
| 11 #include "base/logging.h" |
| 12 #include "base/metrics/histogram_macros.h" |
| 13 |
| 14 namespace { |
| 15 |
| 16 // Required range of memory segment sizes. It has to fit in an unsigned 32-bit |
| 17 // number and should be a power of 2 in order to accomodate almost any page |
| 18 // size. |
| 19 const uint32_t kSegmentMinSize = 1 << 10; // 1 KiB |
| 20 const uint32_t kSegmentMaxSize = 1 << 30; // 1 GiB |
| 21 |
| 22 // A constant (random) value placed in the shared metadata to identify |
| 23 // an already initialized memory segment. |
| 24 const uint32_t kGlobalCookie = 0x408305DC; |
| 25 |
| 26 // The current version of the metadata. If updates are made that change |
| 27 // the metadata, the version number can be queried to operate in a backward- |
| 28 // compatible manner until the memory segment is completely re-initalized. |
| 29 const uint32_t kGlobalVersion = 1; |
| 30 |
| 31 // Constant values placed in the block headers to indicate its state. |
| 32 const uint32_t kBlockCookieFree = 0; |
| 33 const uint32_t kBlockCookieQueue = 1; |
| 34 const uint32_t kBlockCookieWasted = (uint32_t)-1; |
| 35 const uint32_t kBlockCookieAllocated = 0xC8799269; |
| 36 |
| 37 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char> |
| 38 // types rather than combined bitfield. |
| 39 |
| 40 // Flags stored in the flags_ field of the SharedMetaData structure below. |
| 41 enum : int { |
| 42 kFlagCorrupt = 1 << 0, |
| 43 kFlagFull = 1 << 1 |
| 44 }; |
| 45 |
| 46 bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) { |
| 47 uint32_t loaded_flags = flags->load(); |
| 48 return (loaded_flags & flag) != 0; |
| 49 } |
| 50 |
| 51 void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) { |
| 52 uint32_t loaded_flags = flags->load(); |
| 53 for (;;) { |
| 54 uint32_t new_flags = (loaded_flags & ~flag) | flag; |
| 55 // In the failue case, actual "flags" value stored in loaded_flags. |
| 56 if (flags->compare_exchange_weak(loaded_flags, new_flags)) |
| 57 break; |
| 58 } |
| 59 } |
| 60 |
| 61 } // namespace |
| 62 |
| 63 namespace base { |
| 64 |
| 65 // All allocations and data-structures must be aligned to this byte boundary. |
| 66 // Alignment as large as the physical bus between CPU and RAM is _required_ |
| 67 // for some architectures, is simply more efficient on other CPUs, and |
| 68 // generally a Good Idea(tm) for all platforms as it reduces/eliminates the |
| 69 // chance that a type will span cache lines. Alignment mustn't be less |
| 70 // than 8 to ensure proper alignment for all types. The rest is a balance |
| 71 // between reducing spans across multiple cache lines and wasted space spent |
| 72 // padding out allocations. An alignment of 16 would ensure that the block |
| 73 // header structure always sits in a single cache line. An average of about |
| 74 // 1/2 this value will be wasted with every allocation. |
| 75 const uint32_t PersistentMemoryAllocator::kAllocAlignment = 8; |
| 76 |
| 77 // The block-header is placed at the top of every allocation within the |
| 78 // segment to describe the data that follows it. |
| 79 struct PersistentMemoryAllocator::BlockHeader { |
| 80 uint32_t size; // Number of bytes in this block, including header. |
| 81 uint32_t cookie; // Constant value indicating completed allocation. |
| 82 uint32_t type_id; // A number provided by caller indicating data type. |
| 83 std::atomic<uint32_t> next; // Pointer to the next block when iterating. |
| 84 }; |
| 85 |
| 86 // The shared metadata exists once at the top of the memory segment to |
| 87 // describe the state of the allocator to all processes. |
| 88 struct PersistentMemoryAllocator::SharedMetadata { |
| 89 uint32_t cookie; // Some value that indicates complete initialization. |
| 90 uint32_t size; // Total size of memory segment. |
| 91 uint32_t page_size; // Paging size within memory segment. |
| 92 uint32_t version; // Version code so upgrades don't break. |
| 93 std::atomic<uint32_t> freeptr; // Offset/ref to first free space in segment. |
| 94 std::atomic<uint32_t> flags; // Bitfield of information flags. |
| 95 uint64_t id; // Arbitrary ID number given by creator. |
| 96 uint32_t name; // Reference to stored name string. |
| 97 |
| 98 // The "iterable" queue is an M&S Queue as described here, append-only: |
| 99 // https://www.research.ibm.com/people/m/michael/podc-1996.pdf |
| 100 std::atomic<uint32_t> tailptr; // Last block available for iteration. |
| 101 BlockHeader queue; // Empty block for linked-list head/tail. (must be last) |
| 102 }; |
| 103 |
| 104 // The "queue" block header is used to detect "last node" so that zero/null |
| 105 // can be used to indicate that it hasn't been added at all. It is part of |
| 106 // the SharedMetadata structure which itself is always located at offset zero. |
| 107 const PersistentMemoryAllocator::Reference |
| 108 PersistentMemoryAllocator::kReferenceQueue = |
| 109 offsetof(SharedMetadata, queue); |
| 110 const PersistentMemoryAllocator::Reference |
| 111 PersistentMemoryAllocator::kReferenceNull = 0; |
| 112 |
| 113 |
| 114 // static |
| 115 bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base, |
| 116 size_t size, |
| 117 size_t page_size, |
| 118 bool readonly) { |
| 119 return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) && |
| 120 (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) && |
| 121 (size >= kSegmentMinSize || readonly) && |
| 122 (size % kAllocAlignment == 0 || readonly) && |
| 123 (page_size == 0 || size % page_size == 0 || readonly)); |
| 124 } |
| 125 |
| 126 PersistentMemoryAllocator::PersistentMemoryAllocator(void* base, |
| 127 size_t size, |
| 128 size_t page_size, |
| 129 uint64_t id, |
| 130 const std::string& name, |
| 131 bool readonly) |
| 132 : mem_base_(static_cast<char*>(base)), |
| 133 mem_size_(static_cast<uint32_t>(size)), |
| 134 mem_page_(static_cast<uint32_t>((page_size ? page_size : size))), |
| 135 readonly_(readonly), |
| 136 corrupt_(0), |
| 137 allocs_histogram_(nullptr), |
| 138 used_histogram_(nullptr) { |
| 139 static_assert(sizeof(BlockHeader) % kAllocAlignment == 0, |
| 140 "BlockHeader is not a multiple of kAllocAlignment"); |
| 141 static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0, |
| 142 "SharedMetadata is not a multiple of kAllocAlignment"); |
| 143 static_assert(kReferenceQueue % kAllocAlignment == 0, |
| 144 "\"queue\" is not aligned properly; must be at end of struct"); |
| 145 |
| 146 // Ensure that memory segment is of acceptable size. |
| 147 CHECK(IsMemoryAcceptable(base, size, page_size, readonly)); |
| 148 |
| 149 // These atomics operate inter-process and so must be lock-free. The local |
| 150 // casts are to make sure it can be evaluated at compile time to a constant. |
| 151 CHECK(((SharedMetadata*)0)->freeptr.is_lock_free()); |
| 152 CHECK(((SharedMetadata*)0)->flags.is_lock_free()); |
| 153 CHECK(((BlockHeader*)0)->next.is_lock_free()); |
| 154 CHECK(corrupt_.is_lock_free()); |
| 155 |
| 156 if (shared_meta()->cookie != kGlobalCookie) { |
| 157 if (readonly) { |
| 158 NOTREACHED(); |
| 159 SetCorrupt(); |
| 160 return; |
| 161 } |
| 162 |
| 163 // This block is only executed when a completely new memory segment is |
| 164 // being initialized. It's unshared and single-threaded... |
| 165 volatile BlockHeader* const first_block = |
| 166 reinterpret_cast<volatile BlockHeader*>(mem_base_ + |
| 167 sizeof(SharedMetadata)); |
| 168 if (shared_meta()->cookie != 0 || |
| 169 shared_meta()->size != 0 || |
| 170 shared_meta()->version != 0 || |
| 171 shared_meta()->freeptr.load() != 0 || |
| 172 shared_meta()->flags.load() != 0 || |
| 173 shared_meta()->id != 0 || |
| 174 shared_meta()->name != 0 || |
| 175 shared_meta()->tailptr != 0 || |
| 176 shared_meta()->queue.cookie != 0 || |
| 177 shared_meta()->queue.next.load() != 0 || |
| 178 first_block->size != 0 || |
| 179 first_block->cookie != 0 || |
| 180 first_block->type_id != 0 || |
| 181 first_block->next != 0) { |
| 182 // ...or something malicious has been playing with the metadata. |
| 183 NOTREACHED(); |
| 184 SetCorrupt(); |
| 185 } |
| 186 |
| 187 // This is still safe to do even if corruption has been detected. |
| 188 shared_meta()->cookie = kGlobalCookie; |
| 189 shared_meta()->size = mem_size_; |
| 190 shared_meta()->page_size = mem_page_; |
| 191 shared_meta()->version = kGlobalVersion; |
| 192 shared_meta()->id = id; |
| 193 shared_meta()->freeptr.store(sizeof(SharedMetadata)); |
| 194 |
| 195 // Set up the queue of iterable allocations. |
| 196 shared_meta()->queue.size = sizeof(BlockHeader); |
| 197 shared_meta()->queue.cookie = kBlockCookieQueue; |
| 198 shared_meta()->queue.next.store(kReferenceQueue); |
| 199 shared_meta()->tailptr.store(kReferenceQueue); |
| 200 |
| 201 // Allocate space for the name so other processes can learn it. |
| 202 if (!name.empty()) { |
| 203 const size_t name_length = name.length() + 1; |
| 204 shared_meta()->name = Allocate(name_length, 0); |
| 205 char* name_cstr = GetAsObject<char>(shared_meta()->name, 0); |
| 206 if (name_cstr) |
| 207 strcpy(name_cstr, name.c_str()); |
| 208 } |
| 209 } else { |
| 210 if (readonly) { |
| 211 // For read-only access, validate reasonable ctor parameters. |
| 212 DCHECK_GE(mem_size_, shared_meta()->freeptr.load()); |
| 213 } else { |
| 214 // The allocator is attaching to a previously initialized segment of |
| 215 // memory. Make sure the embedded data matches what has been passed. |
| 216 if (shared_meta()->size != mem_size_ || |
| 217 shared_meta()->page_size != mem_page_) { |
| 218 NOTREACHED(); |
| 219 SetCorrupt(); |
| 220 } |
| 221 } |
| 222 } |
| 223 } |
| 224 |
| 225 PersistentMemoryAllocator::~PersistentMemoryAllocator() { |
| 226 // It's strictly forbidden to do any memory access here in case there is |
| 227 // some issue with the underlying memory segment. The "Local" allocator |
| 228 // makes use of this to allow deletion of the segment on the heap from |
| 229 // within its destructor. |
| 230 } |
| 231 |
| 232 uint64_t PersistentMemoryAllocator::Id() const { |
| 233 return shared_meta()->id; |
| 234 } |
| 235 |
| 236 const char* PersistentMemoryAllocator::Name() const { |
| 237 Reference name_ref = shared_meta()->name; |
| 238 const char* name_cstr = GetAsObject<char>(name_ref, 0); |
| 239 if (!name_cstr) |
| 240 return ""; |
| 241 |
| 242 size_t name_length = GetAllocSize(name_ref); |
| 243 if (name_cstr[name_length - 1] != '\0') { |
| 244 NOTREACHED(); |
| 245 SetCorrupt(); |
| 246 return ""; |
| 247 } |
| 248 |
| 249 return name_cstr; |
| 250 } |
| 251 |
| 252 void PersistentMemoryAllocator::CreateTrackingHistograms( |
| 253 const std::string& name) { |
| 254 if (name.empty() || readonly_) |
| 255 return; |
| 256 |
| 257 DCHECK(!used_histogram_); |
| 258 used_histogram_ = Histogram::FactoryGet( |
| 259 name + ".UsedKiB", 1, 256 << 10, 100, HistogramBase::kNoFlags); |
| 260 |
| 261 DCHECK(!allocs_histogram_); |
| 262 allocs_histogram_ = Histogram::FactoryGet( |
| 263 name + ".Allocs", 1, 10000, 50, HistogramBase::kNoFlags); |
| 264 } |
| 265 |
| 266 size_t PersistentMemoryAllocator::used() const { |
| 267 return std::min(shared_meta()->freeptr.load(), mem_size_); |
| 268 } |
| 269 |
| 270 size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const { |
| 271 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); |
| 272 if (!block) |
| 273 return 0; |
| 274 uint32_t size = block->size; |
| 275 // Header was verified by GetBlock() but a malicious actor could change |
| 276 // the value between there and here. Check it again. |
| 277 if (size <= sizeof(BlockHeader) || ref + size >= mem_size_) { |
| 278 SetCorrupt(); |
| 279 return 0; |
| 280 } |
| 281 return size - sizeof(BlockHeader); |
| 282 } |
| 283 |
| 284 uint32_t PersistentMemoryAllocator::GetType(Reference ref) const { |
| 285 const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); |
| 286 if (!block) |
| 287 return 0; |
| 288 return block->type_id; |
| 289 } |
| 290 |
| 291 void PersistentMemoryAllocator::SetType(Reference ref, uint32_t type_id) { |
| 292 DCHECK(!readonly_); |
| 293 volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); |
| 294 if (!block) |
| 295 return; |
| 296 block->type_id = type_id; |
| 297 } |
| 298 |
| 299 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate( |
| 300 size_t req_size, |
| 301 uint32_t type_id) { |
| 302 Reference ref = AllocateImpl(req_size, type_id); |
| 303 if (ref) { |
| 304 // Success: Record this allocation in usage stats (if active). |
| 305 if (allocs_histogram_) |
| 306 allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size)); |
| 307 } else { |
| 308 // Failure: Record an allocation of zero for tracking. |
| 309 if (allocs_histogram_) |
| 310 allocs_histogram_->Add(0); |
| 311 } |
| 312 return ref; |
| 313 } |
| 314 |
| 315 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl( |
| 316 size_t req_size, |
| 317 uint32_t type_id) { |
| 318 DCHECK(!readonly_); |
| 319 |
| 320 // Validate req_size to ensure it won't overflow when used as 32-bit value. |
| 321 if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) { |
| 322 NOTREACHED(); |
| 323 return kReferenceNull; |
| 324 } |
| 325 |
| 326 // Round up the requested size, plus header, to the next allocation alignment. |
| 327 uint32_t size = static_cast<uint32_t>(req_size + sizeof(BlockHeader)); |
| 328 size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1); |
| 329 if (size <= sizeof(BlockHeader) || size > mem_page_) { |
| 330 NOTREACHED(); |
| 331 return kReferenceNull; |
| 332 } |
| 333 |
| 334 // Get the current start of unallocated memory. Other threads may |
| 335 // update this at any time and cause us to retry these operations. |
| 336 // This value should be treated as "const" to avoid confusion through |
| 337 // the code below but recognize that any failed compare-exchange operation |
| 338 // involving it will cause it to be loaded with a more recent value. The |
| 339 // code should either exit or restart the loop in that case. |
| 340 /* const */ uint32_t freeptr = shared_meta()->freeptr.load(); |
| 341 |
| 342 // Allocation is lockless so we do all our caculation and then, if saving |
| 343 // indicates a change has occurred since we started, scrap everything and |
| 344 // start over. |
| 345 for (;;) { |
| 346 if (IsCorrupt()) |
| 347 return kReferenceNull; |
| 348 |
| 349 if (freeptr + size > mem_size_) { |
| 350 SetFlag(&shared_meta()->flags, kFlagFull); |
| 351 return kReferenceNull; |
| 352 } |
| 353 |
| 354 // Get pointer to the "free" block. If something has been allocated since |
| 355 // the load of freeptr above, it is still safe as nothing will be written |
| 356 // to that location until after the compare-exchange below. |
| 357 volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true); |
| 358 if (!block) { |
| 359 SetCorrupt(); |
| 360 return kReferenceNull; |
| 361 } |
| 362 |
| 363 // An allocation cannot cross page boundaries. If it would, create a |
| 364 // "wasted" block and begin again at the top of the next page. This |
| 365 // area could just be left empty but we fill in the block header just |
| 366 // for completeness sake. |
| 367 const uint32_t page_free = mem_page_ - freeptr % mem_page_; |
| 368 if (size > page_free) { |
| 369 if (page_free <= sizeof(BlockHeader)) { |
| 370 SetCorrupt(); |
| 371 return kReferenceNull; |
| 372 } |
| 373 const uint32_t new_freeptr = freeptr + page_free; |
| 374 if (shared_meta()->freeptr.compare_exchange_strong(freeptr, |
| 375 new_freeptr)) { |
| 376 block->size = page_free; |
| 377 block->cookie = kBlockCookieWasted; |
| 378 } |
| 379 continue; |
| 380 } |
| 381 |
| 382 // Don't leave a slice at the end of a page too small for anything. This |
| 383 // can result in an allocation up to two alignment-sizes greater than the |
| 384 // minimum required by requested-size + header + alignment. |
| 385 if (page_free - size < sizeof(BlockHeader) + kAllocAlignment) |
| 386 size = page_free; |
| 387 |
| 388 const uint32_t new_freeptr = freeptr + size; |
| 389 if (new_freeptr > mem_size_) { |
| 390 SetCorrupt(); |
| 391 return kReferenceNull; |
| 392 } |
| 393 |
| 394 // Save our work. Try again if another thread has completed an allocation |
| 395 // while we were processing. A "weak" exchange would be permissable here |
| 396 // because the code will just loop and try again but the above processing |
| 397 // is significant so make the extra effort of a "strong" exchange. |
| 398 if (!shared_meta()->freeptr.compare_exchange_strong(freeptr, new_freeptr)) |
| 399 continue; |
| 400 |
| 401 // Given that all memory was zeroed before ever being given to an instance |
| 402 // of this class and given that we only allocate in a monotomic fashion |
| 403 // going forward, it must be that the newly allocated block is completely |
| 404 // full of zeros. If we find anything in the block header that is NOT a |
| 405 // zero then something must have previously run amuck through memory, |
| 406 // writing beyond the allocated space and into unallocated space. |
| 407 if (block->size != 0 || |
| 408 block->cookie != kBlockCookieFree || |
| 409 block->type_id != 0 || |
| 410 block->next.load() != 0) { |
| 411 SetCorrupt(); |
| 412 return kReferenceNull; |
| 413 } |
| 414 |
| 415 block->size = size; |
| 416 block->cookie = kBlockCookieAllocated; |
| 417 block->type_id = type_id; |
| 418 return freeptr; |
| 419 } |
| 420 } |
| 421 |
| 422 void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const { |
| 423 uint32_t remaining = std::max(mem_size_ - shared_meta()->freeptr.load(), |
| 424 (uint32_t)sizeof(BlockHeader)); |
| 425 meminfo->total = mem_size_; |
| 426 meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader); |
| 427 } |
| 428 |
| 429 void PersistentMemoryAllocator::MakeIterable(Reference ref) { |
| 430 DCHECK(!readonly_); |
| 431 if (IsCorrupt()) |
| 432 return; |
| 433 volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false); |
| 434 if (!block) // invalid reference |
| 435 return; |
| 436 if (block->next.load(std::memory_order_acquire) != 0) // Already iterable. |
| 437 return; |
| 438 block->next.store(kReferenceQueue, std::memory_order_release); // New tail. |
| 439 |
| 440 // Try to add this block to the tail of the queue. May take multiple tries. |
| 441 // If so, tail will be automatically updated with a more recent value during |
| 442 // compare-exchange operations. |
| 443 uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire); |
| 444 for (;;) { |
| 445 // Acquire the current tail-pointer released by previous call to this |
| 446 // method and validate it. |
| 447 block = GetBlock(tail, 0, 0, true, false); |
| 448 if (!block) { |
| 449 SetCorrupt(); |
| 450 return; |
| 451 } |
| 452 |
| 453 // Try to insert the block at the tail of the queue. The tail node always |
| 454 // has an existing value of kReferenceQueue; if that is somehow not the |
| 455 // existing value then another thread has acted in the meantime. A "strong" |
| 456 // exchange is necessary so the "else" block does not get executed when |
| 457 // that is not actually the case (which can happen with a "weak" exchange). |
| 458 uint32_t next = kReferenceQueue; // Will get replaced with existing value. |
| 459 if (block->next.compare_exchange_strong(next, ref, |
| 460 std::memory_order_acq_rel, |
| 461 std::memory_order_acquire)) { |
| 462 // Update the tail pointer to the new offset. If the "else" clause did |
| 463 // not exist, then this could be a simple Release_Store to set the new |
| 464 // value but because it does, it's possible that other threads could add |
| 465 // one or more nodes at the tail before reaching this point. We don't |
| 466 // have to check the return value because it either operates correctly |
| 467 // or the exact same operation has already been done (by the "else" |
| 468 // clause) on some other thread. |
| 469 shared_meta()->tailptr.compare_exchange_strong(tail, ref, |
| 470 std::memory_order_release, |
| 471 std::memory_order_relaxed); |
| 472 return; |
| 473 } else { |
| 474 // In the unlikely case that a thread crashed or was killed between the |
| 475 // update of "next" and the update of "tailptr", it is necessary to |
| 476 // perform the operation that would have been done. There's no explicit |
| 477 // check for crash/kill which means that this operation may also happen |
| 478 // even when the other thread is in perfect working order which is what |
| 479 // necessitates the CompareAndSwap above. |
| 480 shared_meta()->tailptr.compare_exchange_strong(tail, next, |
| 481 std::memory_order_acq_rel, |
| 482 std::memory_order_acquire); |
| 483 } |
| 484 } |
| 485 } |
| 486 |
| 487 void PersistentMemoryAllocator::CreateIterator(Iterator* state, |
| 488 Reference starting_after) const { |
| 489 if (starting_after) { |
| 490 // Ensure that the starting point is a valid, iterable block. |
| 491 const volatile BlockHeader* block = |
| 492 GetBlock(starting_after, 0, 0, false, false); |
| 493 if (!block || !block->next.load()) { |
| 494 NOTREACHED(); |
| 495 starting_after = kReferenceQueue; |
| 496 } |
| 497 } else { |
| 498 // A zero beginning is really the Queue reference. |
| 499 starting_after = kReferenceQueue; |
| 500 } |
| 501 |
| 502 state->last = starting_after; |
| 503 state->niter = 0; |
| 504 } |
| 505 |
| 506 PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetNextIterable( |
| 507 Iterator* state, |
| 508 uint32_t* type_id) const { |
| 509 const volatile BlockHeader* block = GetBlock(state->last, 0, 0, true, false); |
| 510 if (!block) // invalid iterator state |
| 511 return kReferenceNull; |
| 512 |
| 513 // The compiler and CPU can freely reorder all memory accesses on which |
| 514 // there are no dependencies. It could, for example, move the load of |
| 515 // "freeptr" above this point because there are no explicit dependencies |
| 516 // between it and "next". If it did, however, then another block could |
| 517 // be queued after that but before the following load meaning there is |
| 518 // one more queued block than the future "detect loop by having more |
| 519 // blocks that could fit before freeptr" will allow. |
| 520 // |
| 521 // By "acquiring" the "next" value here, it's synchronized to the enqueue |
| 522 // of the node which in turn is synchronized to the allocation (which sets |
| 523 // freeptr). Thus, the scenario above cannot happen. |
| 524 uint32_t next = block->next.load(std::memory_order_acquire); |
| 525 block = GetBlock(next, 0, 0, false, false); |
| 526 if (!block) // no next allocation in queue |
| 527 return kReferenceNull; |
| 528 |
| 529 // Memory corruption could cause a loop in the list. We need to detect |
| 530 // that so as to not cause an infinite loop in the caller. We do this |
| 531 // simply by making sure we don't iterate more than the absolute maximum |
| 532 // number of allocations that could have been made. Callers are likely |
| 533 // to loop multiple times before it is detected but at least it stops. |
| 534 uint32_t freeptr = std::min( |
| 535 shared_meta()->freeptr.load(std::memory_order_acquire), |
| 536 mem_size_); |
| 537 if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) { |
| 538 SetCorrupt(); |
| 539 return kReferenceNull; |
| 540 } |
| 541 |
| 542 state->last = next; |
| 543 state->niter++; |
| 544 *type_id = block->type_id; |
| 545 |
| 546 return next; |
| 547 } |
| 548 |
| 549 // The "corrupted" state is held both locally and globally (shared). The |
| 550 // shared flag can't be trusted since a malicious actor could overwrite it. |
| 551 // Because corruption can be detected during read-only operations such as |
| 552 // iteration, this method may be called by other "const" methods. In this |
| 553 // case, it's safe to discard the constness and modify the local flag and |
| 554 // maybe even the shared flag if the underlying data isn't actually read-only. |
| 555 void PersistentMemoryAllocator::SetCorrupt() const { |
| 556 LOG(ERROR) << "Corruption detected in shared-memory segment."; |
| 557 const_cast<std::atomic<bool>*>(&corrupt_)->store(true); |
| 558 if (!readonly_) { |
| 559 SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags), |
| 560 kFlagCorrupt); |
| 561 } |
| 562 } |
| 563 |
| 564 bool PersistentMemoryAllocator::IsCorrupt() const { |
| 565 if (corrupt_.load() || CheckFlag(&shared_meta()->flags, kFlagCorrupt)) { |
| 566 SetCorrupt(); // Make sure all indicators are set. |
| 567 return true; |
| 568 } |
| 569 return false; |
| 570 } |
| 571 |
| 572 bool PersistentMemoryAllocator::IsFull() const { |
| 573 return CheckFlag(&shared_meta()->flags, kFlagFull); |
| 574 } |
| 575 |
| 576 // Dereference a block |ref| and ensure that it's valid for the desired |
| 577 // |type_id| and |size|. |special| indicates that we may try to access block |
| 578 // headers not available to callers but still accessed by this module. By |
| 579 // having internal dereferences go through this same function, the allocator |
| 580 // is hardened against corruption. |
| 581 const volatile PersistentMemoryAllocator::BlockHeader* |
| 582 PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id, |
| 583 uint32_t size, bool queue_ok, |
| 584 bool free_ok) const { |
| 585 // Validation of parameters. |
| 586 if (ref % kAllocAlignment != 0) |
| 587 return nullptr; |
| 588 if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata))) |
| 589 return nullptr; |
| 590 size += sizeof(BlockHeader); |
| 591 if (ref + size > mem_size_) |
| 592 return nullptr; |
| 593 |
| 594 // Validation of referenced block-header. |
| 595 if (!free_ok) { |
| 596 uint32_t freeptr = shared_meta()->freeptr.load(); |
| 597 if (ref + size > freeptr) |
| 598 return nullptr; |
| 599 const volatile BlockHeader* const block = |
| 600 reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref); |
| 601 if (block->size < size) |
| 602 return nullptr; |
| 603 if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated) |
| 604 return nullptr; |
| 605 if (type_id != 0 && block->type_id != type_id) |
| 606 return nullptr; |
| 607 } |
| 608 |
| 609 // Return pointer to block data. |
| 610 return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref); |
| 611 } |
| 612 |
| 613 const volatile void* PersistentMemoryAllocator::GetBlockData( |
| 614 Reference ref, |
| 615 uint32_t type_id, |
| 616 uint32_t size) const { |
| 617 DCHECK(size > 0); |
| 618 const volatile BlockHeader* block = |
| 619 GetBlock(ref, type_id, size, false, false); |
| 620 if (!block) |
| 621 return nullptr; |
| 622 return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader); |
| 623 } |
| 624 |
| 625 void PersistentMemoryAllocator::UpdateTrackingHistograms() { |
| 626 DCHECK(!readonly_); |
| 627 if (used_histogram_) { |
| 628 MemoryInfo meminfo; |
| 629 GetMemoryInfo(&meminfo); |
| 630 HistogramBase::Sample usedkb = static_cast<HistogramBase::Sample>( |
| 631 (meminfo.total - meminfo.free) >> 10); |
| 632 used_histogram_->Add(usedkb); |
| 633 } |
| 634 } |
| 635 |
| 636 |
| 637 //----- LocalPersistentMemoryAllocator ----------------------------------------- |
| 638 |
| 639 LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator( |
| 640 size_t size, |
| 641 uint64_t id, |
| 642 const std::string& name) |
| 643 : PersistentMemoryAllocator(memset(new char[size], 0, size), |
| 644 size, 0, id, name, false) {} |
| 645 |
| 646 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() { |
| 647 delete [] mem_base_; |
| 648 } |
| 649 |
| 650 |
| 651 //----- FilePersistentMemoryAllocator ------------------------------------------ |
| 652 |
| 653 FilePersistentMemoryAllocator::FilePersistentMemoryAllocator( |
| 654 MemoryMappedFile* file, |
| 655 uint64_t id, |
| 656 const std::string& name) |
| 657 : PersistentMemoryAllocator(const_cast<uint8_t*>(file->data()), |
| 658 file->length(), 0, id, name, true), |
| 659 mapped_file_(file) {} |
| 660 |
| 661 FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() { |
| 662 } |
| 663 |
| 664 // static |
| 665 bool FilePersistentMemoryAllocator::IsFileAcceptable( |
| 666 const MemoryMappedFile& file) { |
| 667 return IsMemoryAcceptable(file.data(), file.length(), 0, true); |
| 668 } |
| 669 |
| 670 } // namespace base |
OLD | NEW |