| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | |
| 3 * | |
| 4 * Redistribution and use in source and binary forms, with or without | |
| 5 * modification, are permitted provided that the following conditions are | |
| 6 * met: | |
| 7 * | |
| 8 * * Redistributions of source code must retain the above copyright | |
| 9 * notice, this list of conditions and the following disclaimer. | |
| 10 * * Redistributions in binary form must reproduce the above | |
| 11 * copyright notice, this list of conditions and the following disclaimer | |
| 12 * in the documentation and/or other materials provided with the | |
| 13 * distribution. | |
| 14 * * Neither the name of Google Inc. nor the names of its | |
| 15 * contributors may be used to endorse or promote products derived from | |
| 16 * this software without specific prior written permission. | |
| 17 * | |
| 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 29 */ | |
| 30 | |
| 31 #include "wtf/allocator/PartitionAlloc.h" | |
| 32 | |
| 33 #include <string.h> | |
| 34 | |
| 35 #ifndef NDEBUG | |
| 36 #include <stdio.h> | |
| 37 #endif | |
| 38 | |
| 39 // Two partition pages are used as guard / metadata page so make sure the super | |
| 40 // page size is bigger. | |
| 41 static_assert(WTF::kPartitionPageSize * 4 <= WTF::kSuperPageSize, | |
| 42 "ok super page size"); | |
| 43 static_assert(!(WTF::kSuperPageSize % WTF::kPartitionPageSize), | |
| 44 "ok super page multiple"); | |
| 45 // Four system pages gives us room to hack out a still-guard-paged piece | |
| 46 // of metadata in the middle of a guard partition page. | |
| 47 static_assert(WTF::kSystemPageSize * 4 <= WTF::kPartitionPageSize, | |
| 48 "ok partition page size"); | |
| 49 static_assert(!(WTF::kPartitionPageSize % WTF::kSystemPageSize), | |
| 50 "ok partition page multiple"); | |
| 51 static_assert(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, | |
| 52 "PartitionPage should not be too big"); | |
| 53 static_assert(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, | |
| 54 "PartitionBucket should not be too big"); | |
| 55 static_assert(sizeof(WTF::PartitionSuperPageExtentEntry) <= | |
| 56 WTF::kPageMetadataSize, | |
| 57 "PartitionSuperPageExtentEntry should not be too big"); | |
| 58 static_assert(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= | |
| 59 WTF::kSystemPageSize, | |
| 60 "page metadata fits in hole"); | |
| 61 // Check that some of our zanier calculations worked out as expected. | |
| 62 static_assert(WTF::kGenericSmallestBucket == 8, "generic smallest bucket"); | |
| 63 static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed"); | |
| 64 static_assert(WTF::kMaxSystemPagesPerSlotSpan < (1 << 8), | |
| 65 "System pages per slot span must be less than 128."); | |
| 66 | |
| 67 namespace WTF { | |
| 68 | |
| 69 SpinLock PartitionRootBase::gInitializedLock; | |
| 70 bool PartitionRootBase::gInitialized = false; | |
| 71 PartitionPage PartitionRootBase::gSeedPage; | |
| 72 PartitionBucket PartitionRootBase::gPagedBucket; | |
| 73 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; | |
| 74 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = | |
| 75 nullptr; | |
| 76 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; | |
| 77 | |
| 78 static uint8_t partitionBucketNumSystemPages(size_t size) { | |
| 79 // This works out reasonably for the current bucket sizes of the generic | |
| 80 // allocator, and the current values of partition page size and constants. | |
| 81 // Specifically, we have enough room to always pack the slots perfectly into | |
| 82 // some number of system pages. The only waste is the waste associated with | |
| 83 // unfaulted pages (i.e. wasted address space). | |
| 84 // TODO: we end up using a lot of system pages for very small sizes. For | |
| 85 // example, we'll use 12 system pages for slot size 24. The slot size is | |
| 86 // so small that the waste would be tiny with just 4, or 1, system pages. | |
| 87 // Later, we can investigate whether there are anti-fragmentation benefits | |
| 88 // to using fewer system pages. | |
| 89 double bestWasteRatio = 1.0f; | |
| 90 uint16_t bestPages = 0; | |
| 91 if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { | |
| 92 ASSERT(!(size % kSystemPageSize)); | |
| 93 bestPages = static_cast<uint16_t>(size / kSystemPageSize); | |
| 94 RELEASE_ASSERT(bestPages < (1 << 8)); | |
| 95 return static_cast<uint8_t>(bestPages); | |
| 96 } | |
| 97 ASSERT(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); | |
| 98 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; | |
| 99 i <= kMaxSystemPagesPerSlotSpan; ++i) { | |
| 100 size_t pageSize = kSystemPageSize * i; | |
| 101 size_t numSlots = pageSize / size; | |
| 102 size_t waste = pageSize - (numSlots * size); | |
| 103 // Leaving a page unfaulted is not free; the page will occupy an empty page | |
| 104 // table entry. Make a simple attempt to account for that. | |
| 105 size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); | |
| 106 size_t numUnfaultedPages = | |
| 107 numRemainderPages | |
| 108 ? (kNumSystemPagesPerPartitionPage - numRemainderPages) | |
| 109 : 0; | |
| 110 waste += sizeof(void*) * numUnfaultedPages; | |
| 111 double wasteRatio = (double)waste / (double)pageSize; | |
| 112 if (wasteRatio < bestWasteRatio) { | |
| 113 bestWasteRatio = wasteRatio; | |
| 114 bestPages = i; | |
| 115 } | |
| 116 } | |
| 117 ASSERT(bestPages > 0); | |
| 118 RELEASE_ASSERT(bestPages <= kMaxSystemPagesPerSlotSpan); | |
| 119 return static_cast<uint8_t>(bestPages); | |
| 120 } | |
| 121 | |
| 122 static void partitionAllocBaseInit(PartitionRootBase* root) { | |
| 123 ASSERT(!root->initialized); | |
| 124 { | |
| 125 SpinLock::Guard guard(PartitionRootBase::gInitializedLock); | |
| 126 if (!PartitionRootBase::gInitialized) { | |
| 127 PartitionRootBase::gInitialized = true; | |
| 128 // We mark the seed page as free to make sure it is skipped by our | |
| 129 // logic to find a new active page. | |
| 130 PartitionRootBase::gPagedBucket.activePagesHead = | |
| 131 &PartitionRootGeneric::gSeedPage; | |
| 132 } | |
| 133 } | |
| 134 | |
| 135 root->initialized = true; | |
| 136 root->totalSizeOfCommittedPages = 0; | |
| 137 root->totalSizeOfSuperPages = 0; | |
| 138 root->totalSizeOfDirectMappedPages = 0; | |
| 139 root->nextSuperPage = 0; | |
| 140 root->nextPartitionPage = 0; | |
| 141 root->nextPartitionPageEnd = 0; | |
| 142 root->firstExtent = 0; | |
| 143 root->currentExtent = 0; | |
| 144 root->directMapList = 0; | |
| 145 | |
| 146 memset(&root->globalEmptyPageRing, '\0', sizeof(root->globalEmptyPageRing)); | |
| 147 root->globalEmptyPageRingIndex = 0; | |
| 148 | |
| 149 // This is a "magic" value so we can test if a root pointer is valid. | |
| 150 root->invertedSelf = ~reinterpret_cast<uintptr_t>(root); | |
| 151 } | |
| 152 | |
| 153 static void partitionBucketInitBase(PartitionBucket* bucket, | |
| 154 PartitionRootBase* root) { | |
| 155 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | |
| 156 bucket->emptyPagesHead = 0; | |
| 157 bucket->decommittedPagesHead = 0; | |
| 158 bucket->numFullPages = 0; | |
| 159 bucket->numSystemPagesPerSlotSpan = | |
| 160 partitionBucketNumSystemPages(bucket->slotSize); | |
| 161 } | |
| 162 | |
| 163 void partitionAllocGlobalInit(void (*oomHandlingFunction)()) { | |
| 164 ASSERT(oomHandlingFunction); | |
| 165 PartitionRootBase::gOomHandlingFunction = oomHandlingFunction; | |
| 166 } | |
| 167 | |
| 168 void partitionAllocInit(PartitionRoot* root, | |
| 169 size_t numBuckets, | |
| 170 size_t maxAllocation) { | |
| 171 partitionAllocBaseInit(root); | |
| 172 | |
| 173 root->numBuckets = numBuckets; | |
| 174 root->maxAllocation = maxAllocation; | |
| 175 size_t i; | |
| 176 for (i = 0; i < root->numBuckets; ++i) { | |
| 177 PartitionBucket* bucket = &root->buckets()[i]; | |
| 178 if (!i) | |
| 179 bucket->slotSize = kAllocationGranularity; | |
| 180 else | |
| 181 bucket->slotSize = i << kBucketShift; | |
| 182 partitionBucketInitBase(bucket, root); | |
| 183 } | |
| 184 } | |
| 185 | |
| 186 void partitionAllocGenericInit(PartitionRootGeneric* root) { | |
| 187 SpinLock::Guard guard(root->lock); | |
| 188 | |
| 189 partitionAllocBaseInit(root); | |
| 190 | |
| 191 // Precalculate some shift and mask constants used in the hot path. | |
| 192 // Example: malloc(41) == 101001 binary. | |
| 193 // Order is 6 (1 << 6-1)==32 is highest bit set. | |
| 194 // orderIndex is the next three MSB == 010 == 2. | |
| 195 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for | |
| 196 // the subOrderIndex). | |
| 197 size_t order; | |
| 198 for (order = 0; order <= kBitsPerSizet; ++order) { | |
| 199 size_t orderIndexShift; | |
| 200 if (order < kGenericNumBucketsPerOrderBits + 1) | |
| 201 orderIndexShift = 0; | |
| 202 else | |
| 203 orderIndexShift = order - (kGenericNumBucketsPerOrderBits + 1); | |
| 204 root->orderIndexShifts[order] = orderIndexShift; | |
| 205 size_t subOrderIndexMask; | |
| 206 if (order == kBitsPerSizet) { | |
| 207 // This avoids invoking undefined behavior for an excessive shift. | |
| 208 subOrderIndexMask = | |
| 209 static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1); | |
| 210 } else { | |
| 211 subOrderIndexMask = ((static_cast<size_t>(1) << order) - 1) >> | |
| 212 (kGenericNumBucketsPerOrderBits + 1); | |
| 213 } | |
| 214 root->orderSubIndexMasks[order] = subOrderIndexMask; | |
| 215 } | |
| 216 | |
| 217 // Set up the actual usable buckets first. | |
| 218 // Note that typical values (i.e. min allocation size of 8) will result in | |
| 219 // pseudo buckets (size==9 etc. or more generally, size is not a multiple | |
| 220 // of the smallest allocation granularity). | |
| 221 // We avoid them in the bucket lookup map, but we tolerate them to keep the | |
| 222 // code simpler and the structures more generic. | |
| 223 size_t i, j; | |
| 224 size_t currentSize = kGenericSmallestBucket; | |
| 225 size_t currentIncrement = | |
| 226 kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits; | |
| 227 PartitionBucket* bucket = &root->buckets[0]; | |
| 228 for (i = 0; i < kGenericNumBucketedOrders; ++i) { | |
| 229 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { | |
| 230 bucket->slotSize = currentSize; | |
| 231 partitionBucketInitBase(bucket, root); | |
| 232 // Disable psuedo buckets so that touching them faults. | |
| 233 if (currentSize % kGenericSmallestBucket) | |
| 234 bucket->activePagesHead = 0; | |
| 235 currentSize += currentIncrement; | |
| 236 ++bucket; | |
| 237 } | |
| 238 currentIncrement <<= 1; | |
| 239 } | |
| 240 ASSERT(currentSize == 1 << kGenericMaxBucketedOrder); | |
| 241 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); | |
| 242 | |
| 243 // Then set up the fast size -> bucket lookup table. | |
| 244 bucket = &root->buckets[0]; | |
| 245 PartitionBucket** bucketPtr = &root->bucketLookups[0]; | |
| 246 for (order = 0; order <= kBitsPerSizet; ++order) { | |
| 247 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { | |
| 248 if (order < kGenericMinBucketedOrder) { | |
| 249 // Use the bucket of the finest granularity for malloc(0) etc. | |
| 250 *bucketPtr++ = &root->buckets[0]; | |
| 251 } else if (order > kGenericMaxBucketedOrder) { | |
| 252 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket; | |
| 253 } else { | |
| 254 PartitionBucket* validBucket = bucket; | |
| 255 // Skip over invalid buckets. | |
| 256 while (validBucket->slotSize % kGenericSmallestBucket) | |
| 257 validBucket++; | |
| 258 *bucketPtr++ = validBucket; | |
| 259 bucket++; | |
| 260 } | |
| 261 } | |
| 262 } | |
| 263 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); | |
| 264 ASSERT(bucketPtr == | |
| 265 &root->bucketLookups[0] + | |
| 266 ((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder)); | |
| 267 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), | |
| 268 // which tries to overflow to a non-existant order. | |
| 269 *bucketPtr = &PartitionRootGeneric::gPagedBucket; | |
| 270 } | |
| 271 | |
| 272 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) { | |
| 273 // Failure here indicates a memory leak. | |
| 274 bool foundLeak = bucket->numFullPages; | |
| 275 for (PartitionPage* page = bucket->activePagesHead; page; | |
| 276 page = page->nextPage) | |
| 277 foundLeak |= (page->numAllocatedSlots > 0); | |
| 278 return foundLeak; | |
| 279 } | |
| 280 | |
| 281 static bool partitionAllocBaseShutdown(PartitionRootBase* root) { | |
| 282 ASSERT(root->initialized); | |
| 283 root->initialized = false; | |
| 284 | |
| 285 // Now that we've examined all partition pages in all buckets, it's safe | |
| 286 // to free all our super pages. Since the super page extent entries are | |
| 287 // stored in the super pages, we need to be careful not to access them | |
| 288 // after we've released the corresponding super page. | |
| 289 PartitionSuperPageExtentEntry* entry = root->firstExtent; | |
| 290 while (entry) { | |
| 291 PartitionSuperPageExtentEntry* nextEntry = entry->next; | |
| 292 char* superPage = entry->superPageBase; | |
| 293 char* superPagesEnd = entry->superPagesEnd; | |
| 294 while (superPage < superPagesEnd) { | |
| 295 freePages(superPage, kSuperPageSize); | |
| 296 superPage += kSuperPageSize; | |
| 297 } | |
| 298 entry = nextEntry; | |
| 299 } | |
| 300 return root->directMapList; | |
| 301 } | |
| 302 | |
| 303 bool partitionAllocShutdown(PartitionRoot* root) { | |
| 304 bool foundLeak = false; | |
| 305 size_t i; | |
| 306 for (i = 0; i < root->numBuckets; ++i) { | |
| 307 PartitionBucket* bucket = &root->buckets()[i]; | |
| 308 foundLeak |= partitionAllocShutdownBucket(bucket); | |
| 309 } | |
| 310 foundLeak |= partitionAllocBaseShutdown(root); | |
| 311 return !foundLeak; | |
| 312 } | |
| 313 | |
| 314 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) { | |
| 315 SpinLock::Guard guard(root->lock); | |
| 316 bool foundLeak = false; | |
| 317 size_t i; | |
| 318 for (i = 0; i < kGenericNumBuckets; ++i) { | |
| 319 PartitionBucket* bucket = &root->buckets[i]; | |
| 320 foundLeak |= partitionAllocShutdownBucket(bucket); | |
| 321 } | |
| 322 foundLeak |= partitionAllocBaseShutdown(root); | |
| 323 return !foundLeak; | |
| 324 } | |
| 325 | |
| 326 #if !CPU(64BIT) | |
| 327 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() { | |
| 328 OOM_CRASH(); | |
| 329 } | |
| 330 #endif | |
| 331 | |
| 332 static NEVER_INLINE void partitionOutOfMemory(const PartitionRootBase* root) { | |
| 333 #if !CPU(64BIT) | |
| 334 // Check whether this OOM is due to a lot of super pages that are allocated | |
| 335 // but not committed, probably due to http://crbug.com/421387. | |
| 336 if (root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages - | |
| 337 root->totalSizeOfCommittedPages > | |
| 338 kReasonableSizeOfUnusedPages) { | |
| 339 partitionOutOfMemoryWithLotsOfUncommitedPages(); | |
| 340 } | |
| 341 #endif | |
| 342 if (PartitionRootBase::gOomHandlingFunction) | |
| 343 (*PartitionRootBase::gOomHandlingFunction)(); | |
| 344 OOM_CRASH(); | |
| 345 } | |
| 346 | |
| 347 static NEVER_INLINE void partitionExcessiveAllocationSize() { | |
| 348 OOM_CRASH(); | |
| 349 } | |
| 350 | |
| 351 static NEVER_INLINE void partitionBucketFull() { | |
| 352 OOM_CRASH(); | |
| 353 } | |
| 354 | |
| 355 // partitionPageStateIs* | |
| 356 // Note that it's only valid to call these functions on pages found on one of | |
| 357 // the page lists. Specifically, you can't call these functions on full pages | |
| 358 // that were detached from the active list. | |
| 359 static bool ALWAYS_INLINE | |
| 360 partitionPageStateIsActive(const PartitionPage* page) { | |
| 361 ASSERT(page != &PartitionRootGeneric::gSeedPage); | |
| 362 ASSERT(!page->pageOffset); | |
| 363 return (page->numAllocatedSlots > 0 && | |
| 364 (page->freelistHead || page->numUnprovisionedSlots)); | |
| 365 } | |
| 366 | |
| 367 static bool ALWAYS_INLINE partitionPageStateIsFull(const PartitionPage* page) { | |
| 368 ASSERT(page != &PartitionRootGeneric::gSeedPage); | |
| 369 ASSERT(!page->pageOffset); | |
| 370 bool ret = (page->numAllocatedSlots == partitionBucketSlots(page->bucket)); | |
| 371 if (ret) { | |
| 372 ASSERT(!page->freelistHead); | |
| 373 ASSERT(!page->numUnprovisionedSlots); | |
| 374 } | |
| 375 return ret; | |
| 376 } | |
| 377 | |
| 378 static bool ALWAYS_INLINE partitionPageStateIsEmpty(const PartitionPage* page) { | |
| 379 ASSERT(page != &PartitionRootGeneric::gSeedPage); | |
| 380 ASSERT(!page->pageOffset); | |
| 381 return (!page->numAllocatedSlots && page->freelistHead); | |
| 382 } | |
| 383 | |
| 384 static bool ALWAYS_INLINE | |
| 385 partitionPageStateIsDecommitted(const PartitionPage* page) { | |
| 386 ASSERT(page != &PartitionRootGeneric::gSeedPage); | |
| 387 ASSERT(!page->pageOffset); | |
| 388 bool ret = (!page->numAllocatedSlots && !page->freelistHead); | |
| 389 if (ret) { | |
| 390 ASSERT(!page->numUnprovisionedSlots); | |
| 391 ASSERT(page->emptyCacheIndex == -1); | |
| 392 } | |
| 393 return ret; | |
| 394 } | |
| 395 | |
| 396 static void partitionIncreaseCommittedPages(PartitionRootBase* root, | |
| 397 size_t len) { | |
| 398 root->totalSizeOfCommittedPages += len; | |
| 399 ASSERT(root->totalSizeOfCommittedPages <= | |
| 400 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); | |
| 401 } | |
| 402 | |
| 403 static void partitionDecreaseCommittedPages(PartitionRootBase* root, | |
| 404 size_t len) { | |
| 405 root->totalSizeOfCommittedPages -= len; | |
| 406 ASSERT(root->totalSizeOfCommittedPages <= | |
| 407 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); | |
| 408 } | |
| 409 | |
| 410 static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, | |
| 411 void* addr, | |
| 412 size_t len) { | |
| 413 decommitSystemPages(addr, len); | |
| 414 partitionDecreaseCommittedPages(root, len); | |
| 415 } | |
| 416 | |
| 417 static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, | |
| 418 void* addr, | |
| 419 size_t len) { | |
| 420 recommitSystemPages(addr, len); | |
| 421 partitionIncreaseCommittedPages(root, len); | |
| 422 } | |
| 423 | |
| 424 static ALWAYS_INLINE void* partitionAllocPartitionPages( | |
| 425 PartitionRootBase* root, | |
| 426 int flags, | |
| 427 uint16_t numPartitionPages) { | |
| 428 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % | |
| 429 kPartitionPageSize)); | |
| 430 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % | |
| 431 kPartitionPageSize)); | |
| 432 ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage); | |
| 433 size_t totalSize = kPartitionPageSize * numPartitionPages; | |
| 434 size_t numPartitionPagesLeft = | |
| 435 (root->nextPartitionPageEnd - root->nextPartitionPage) >> | |
| 436 kPartitionPageShift; | |
| 437 if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { | |
| 438 // In this case, we can still hand out pages from the current super page | |
| 439 // allocation. | |
| 440 char* ret = root->nextPartitionPage; | |
| 441 root->nextPartitionPage += totalSize; | |
| 442 partitionIncreaseCommittedPages(root, totalSize); | |
| 443 return ret; | |
| 444 } | |
| 445 | |
| 446 // Need a new super page. We want to allocate super pages in a continguous | |
| 447 // address region as much as possible. This is important for not causing | |
| 448 // page table bloat and not fragmenting address spaces in 32 bit | |
| 449 // architectures. | |
| 450 char* requestedAddress = root->nextSuperPage; | |
| 451 char* superPage = reinterpret_cast<char*>(allocPages( | |
| 452 requestedAddress, kSuperPageSize, kSuperPageSize, PageAccessible)); | |
| 453 if (UNLIKELY(!superPage)) | |
| 454 return 0; | |
| 455 | |
| 456 root->totalSizeOfSuperPages += kSuperPageSize; | |
| 457 partitionIncreaseCommittedPages(root, totalSize); | |
| 458 | |
| 459 root->nextSuperPage = superPage + kSuperPageSize; | |
| 460 char* ret = superPage + kPartitionPageSize; | |
| 461 root->nextPartitionPage = ret + totalSize; | |
| 462 root->nextPartitionPageEnd = root->nextSuperPage - kPartitionPageSize; | |
| 463 // Make the first partition page in the super page a guard page, but leave a | |
| 464 // hole in the middle. | |
| 465 // This is where we put page metadata and also a tiny amount of extent | |
| 466 // metadata. | |
| 467 setSystemPagesInaccessible(superPage, kSystemPageSize); | |
| 468 setSystemPagesInaccessible(superPage + (kSystemPageSize * 2), | |
| 469 kPartitionPageSize - (kSystemPageSize * 2)); | |
| 470 // Also make the last partition page a guard page. | |
| 471 setSystemPagesInaccessible(superPage + (kSuperPageSize - kPartitionPageSize), | |
| 472 kPartitionPageSize); | |
| 473 | |
| 474 // If we were after a specific address, but didn't get it, assume that | |
| 475 // the system chose a lousy address. Here most OS'es have a default | |
| 476 // algorithm that isn't randomized. For example, most Linux | |
| 477 // distributions will allocate the mapping directly before the last | |
| 478 // successful mapping, which is far from random. So we just get fresh | |
| 479 // randomness for the next mapping attempt. | |
| 480 if (requestedAddress && requestedAddress != superPage) | |
| 481 root->nextSuperPage = 0; | |
| 482 | |
| 483 // We allocated a new super page so update super page metadata. | |
| 484 // First check if this is a new extent or not. | |
| 485 PartitionSuperPageExtentEntry* latestExtent = | |
| 486 reinterpret_cast<PartitionSuperPageExtentEntry*>( | |
| 487 partitionSuperPageToMetadataArea(superPage)); | |
| 488 // By storing the root in every extent metadata object, we have a fast way | |
| 489 // to go from a pointer within the partition to the root object. | |
| 490 latestExtent->root = root; | |
| 491 // Most new extents will be part of a larger extent, and these three fields | |
| 492 // are unused, but we initialize them to 0 so that we get a clear signal | |
| 493 // in case they are accidentally used. | |
| 494 latestExtent->superPageBase = 0; | |
| 495 latestExtent->superPagesEnd = 0; | |
| 496 latestExtent->next = 0; | |
| 497 | |
| 498 PartitionSuperPageExtentEntry* currentExtent = root->currentExtent; | |
| 499 bool isNewExtent = (superPage != requestedAddress); | |
| 500 if (UNLIKELY(isNewExtent)) { | |
| 501 if (UNLIKELY(!currentExtent)) { | |
| 502 ASSERT(!root->firstExtent); | |
| 503 root->firstExtent = latestExtent; | |
| 504 } else { | |
| 505 ASSERT(currentExtent->superPageBase); | |
| 506 currentExtent->next = latestExtent; | |
| 507 } | |
| 508 root->currentExtent = latestExtent; | |
| 509 latestExtent->superPageBase = superPage; | |
| 510 latestExtent->superPagesEnd = superPage + kSuperPageSize; | |
| 511 } else { | |
| 512 // We allocated next to an existing extent so just nudge the size up a | |
| 513 // little. | |
| 514 ASSERT(currentExtent->superPagesEnd); | |
| 515 currentExtent->superPagesEnd += kSuperPageSize; | |
| 516 ASSERT(ret >= currentExtent->superPageBase && | |
| 517 ret < currentExtent->superPagesEnd); | |
| 518 } | |
| 519 return ret; | |
| 520 } | |
| 521 | |
| 522 static ALWAYS_INLINE uint16_t | |
| 523 partitionBucketPartitionPages(const PartitionBucket* bucket) { | |
| 524 return (bucket->numSystemPagesPerSlotSpan + | |
| 525 (kNumSystemPagesPerPartitionPage - 1)) / | |
| 526 kNumSystemPagesPerPartitionPage; | |
| 527 } | |
| 528 | |
| 529 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) { | |
| 530 ASSERT(partitionPageStateIsDecommitted(page)); | |
| 531 | |
| 532 page->numUnprovisionedSlots = partitionBucketSlots(page->bucket); | |
| 533 ASSERT(page->numUnprovisionedSlots); | |
| 534 | |
| 535 page->nextPage = nullptr; | |
| 536 } | |
| 537 | |
| 538 static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, | |
| 539 PartitionBucket* bucket) { | |
| 540 // The bucket never changes. We set it up once. | |
| 541 page->bucket = bucket; | |
| 542 page->emptyCacheIndex = -1; | |
| 543 | |
| 544 partitionPageReset(page); | |
| 545 | |
| 546 // If this page has just a single slot, do not set up page offsets for any | |
| 547 // page metadata other than the first one. This ensures that attempts to | |
| 548 // touch invalid page metadata fail. | |
| 549 if (page->numUnprovisionedSlots == 1) | |
| 550 return; | |
| 551 | |
| 552 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | |
| 553 char* pageCharPtr = reinterpret_cast<char*>(page); | |
| 554 for (uint16_t i = 1; i < numPartitionPages; ++i) { | |
| 555 pageCharPtr += kPageMetadataSize; | |
| 556 PartitionPage* secondaryPage = | |
| 557 reinterpret_cast<PartitionPage*>(pageCharPtr); | |
| 558 secondaryPage->pageOffset = i; | |
| 559 } | |
| 560 } | |
| 561 | |
| 562 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist( | |
| 563 PartitionPage* page) { | |
| 564 ASSERT(page != &PartitionRootGeneric::gSeedPage); | |
| 565 uint16_t numSlots = page->numUnprovisionedSlots; | |
| 566 ASSERT(numSlots); | |
| 567 PartitionBucket* bucket = page->bucket; | |
| 568 // We should only get here when _every_ slot is either used or unprovisioned. | |
| 569 // (The third state is "on the freelist". If we have a non-empty freelist, we | |
| 570 // should not get here.) | |
| 571 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); | |
| 572 // Similarly, make explicitly sure that the freelist is empty. | |
| 573 ASSERT(!page->freelistHead); | |
| 574 ASSERT(page->numAllocatedSlots >= 0); | |
| 575 | |
| 576 size_t size = bucket->slotSize; | |
| 577 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); | |
| 578 char* returnObject = base + (size * page->numAllocatedSlots); | |
| 579 char* firstFreelistPointer = returnObject + size; | |
| 580 char* firstFreelistPointerExtent = | |
| 581 firstFreelistPointer + sizeof(PartitionFreelistEntry*); | |
| 582 // Our goal is to fault as few system pages as possible. We calculate the | |
| 583 // page containing the "end" of the returned slot, and then allow freelist | |
| 584 // pointers to be written up to the end of that page. | |
| 585 char* subPageLimit = reinterpret_cast<char*>( | |
| 586 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer))); | |
| 587 char* slotsLimit = returnObject + (size * numSlots); | |
| 588 char* freelistLimit = subPageLimit; | |
| 589 if (UNLIKELY(slotsLimit < freelistLimit)) | |
| 590 freelistLimit = slotsLimit; | |
| 591 | |
| 592 uint16_t numNewFreelistEntries = 0; | |
| 593 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { | |
| 594 // Only consider used space in the slot span. If we consider wasted | |
| 595 // space, we may get an off-by-one when a freelist pointer fits in the | |
| 596 // wasted space, but a slot does not. | |
| 597 // We know we can fit at least one freelist pointer. | |
| 598 numNewFreelistEntries = 1; | |
| 599 // Any further entries require space for the whole slot span. | |
| 600 numNewFreelistEntries += static_cast<uint16_t>( | |
| 601 (freelistLimit - firstFreelistPointerExtent) / size); | |
| 602 } | |
| 603 | |
| 604 // We always return an object slot -- that's the +1 below. | |
| 605 // We do not neccessarily create any new freelist entries, because we cross | |
| 606 // sub page boundaries frequently for large bucket sizes. | |
| 607 ASSERT(numNewFreelistEntries + 1 <= numSlots); | |
| 608 numSlots -= (numNewFreelistEntries + 1); | |
| 609 page->numUnprovisionedSlots = numSlots; | |
| 610 page->numAllocatedSlots++; | |
| 611 | |
| 612 if (LIKELY(numNewFreelistEntries)) { | |
| 613 char* freelistPointer = firstFreelistPointer; | |
| 614 PartitionFreelistEntry* entry = | |
| 615 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); | |
| 616 page->freelistHead = entry; | |
| 617 while (--numNewFreelistEntries) { | |
| 618 freelistPointer += size; | |
| 619 PartitionFreelistEntry* nextEntry = | |
| 620 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); | |
| 621 entry->next = partitionFreelistMask(nextEntry); | |
| 622 entry = nextEntry; | |
| 623 } | |
| 624 entry->next = partitionFreelistMask(0); | |
| 625 } else { | |
| 626 page->freelistHead = 0; | |
| 627 } | |
| 628 return returnObject; | |
| 629 } | |
| 630 | |
| 631 // This helper function scans a bucket's active page list for a suitable new | |
| 632 // active page. | |
| 633 // When it finds a suitable new active page (one that has free slots and is not | |
| 634 // empty), it is set as the new active page. If there is no suitable new | |
| 635 // active page, the current active page is set to the seed page. | |
| 636 // As potential pages are scanned, they are tidied up according to their state. | |
| 637 // Empty pages are swept on to the empty page list, decommitted pages on to the | |
| 638 // decommitted page list and full pages are unlinked from any list. | |
| 639 static bool partitionSetNewActivePage(PartitionBucket* bucket) { | |
| 640 PartitionPage* page = bucket->activePagesHead; | |
| 641 if (page == &PartitionRootBase::gSeedPage) | |
| 642 return false; | |
| 643 | |
| 644 PartitionPage* nextPage; | |
| 645 | |
| 646 for (; page; page = nextPage) { | |
| 647 nextPage = page->nextPage; | |
| 648 ASSERT(page->bucket == bucket); | |
| 649 ASSERT(page != bucket->emptyPagesHead); | |
| 650 ASSERT(page != bucket->decommittedPagesHead); | |
| 651 | |
| 652 // Deal with empty and decommitted pages. | |
| 653 if (LIKELY(partitionPageStateIsActive(page))) { | |
| 654 // This page is usable because it has freelist entries, or has | |
| 655 // unprovisioned slots we can create freelist entries from. | |
| 656 bucket->activePagesHead = page; | |
| 657 return true; | |
| 658 } | |
| 659 if (LIKELY(partitionPageStateIsEmpty(page))) { | |
| 660 page->nextPage = bucket->emptyPagesHead; | |
| 661 bucket->emptyPagesHead = page; | |
| 662 } else if (LIKELY(partitionPageStateIsDecommitted(page))) { | |
| 663 page->nextPage = bucket->decommittedPagesHead; | |
| 664 bucket->decommittedPagesHead = page; | |
| 665 } else { | |
| 666 ASSERT(partitionPageStateIsFull(page)); | |
| 667 // If we get here, we found a full page. Skip over it too, and also | |
| 668 // tag it as full (via a negative value). We need it tagged so that | |
| 669 // free'ing can tell, and move it back into the active page list. | |
| 670 page->numAllocatedSlots = -page->numAllocatedSlots; | |
| 671 ++bucket->numFullPages; | |
| 672 // numFullPages is a uint16_t for efficient packing so guard against | |
| 673 // overflow to be safe. | |
| 674 if (UNLIKELY(!bucket->numFullPages)) | |
| 675 partitionBucketFull(); | |
| 676 // Not necessary but might help stop accidents. | |
| 677 page->nextPage = 0; | |
| 678 } | |
| 679 } | |
| 680 | |
| 681 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | |
| 682 return false; | |
| 683 } | |
| 684 | |
| 685 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent( | |
| 686 PartitionPage* page) { | |
| 687 ASSERT(partitionBucketIsDirectMapped(page->bucket)); | |
| 688 return reinterpret_cast<PartitionDirectMapExtent*>( | |
| 689 reinterpret_cast<char*>(page) + 3 * kPageMetadataSize); | |
| 690 } | |
| 691 | |
| 692 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, | |
| 693 size_t size) { | |
| 694 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | |
| 695 if (UNLIKELY(rawSizePtr != nullptr)) | |
| 696 *rawSizePtr = size; | |
| 697 } | |
| 698 | |
| 699 static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root, | |
| 700 int flags, | |
| 701 size_t rawSize) { | |
| 702 size_t size = partitionDirectMapSize(rawSize); | |
| 703 | |
| 704 // Because we need to fake looking like a super page, we need to allocate | |
| 705 // a bunch of system pages more than "size": | |
| 706 // - The first few system pages are the partition page in which the super | |
| 707 // page metadata is stored. We fault just one system page out of a partition | |
| 708 // page sized clump. | |
| 709 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the | |
| 710 // massive address space plus randomization instead). | |
| 711 size_t mapSize = size + kPartitionPageSize; | |
| 712 #if !CPU(64BIT) | |
| 713 mapSize += kSystemPageSize; | |
| 714 #endif | |
| 715 // Round up to the allocation granularity. | |
| 716 mapSize += kPageAllocationGranularityOffsetMask; | |
| 717 mapSize &= kPageAllocationGranularityBaseMask; | |
| 718 | |
| 719 // TODO: these pages will be zero-filled. Consider internalizing an | |
| 720 // allocZeroed() API so we can avoid a memset() entirely in this case. | |
| 721 char* ptr = reinterpret_cast<char*>( | |
| 722 allocPages(0, mapSize, kSuperPageSize, PageAccessible)); | |
| 723 if (UNLIKELY(!ptr)) | |
| 724 return nullptr; | |
| 725 | |
| 726 size_t committedPageSize = size + kSystemPageSize; | |
| 727 root->totalSizeOfDirectMappedPages += committedPageSize; | |
| 728 partitionIncreaseCommittedPages(root, committedPageSize); | |
| 729 | |
| 730 char* slot = ptr + kPartitionPageSize; | |
| 731 setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), | |
| 732 kPartitionPageSize - (kSystemPageSize * 2)); | |
| 733 #if !CPU(64BIT) | |
| 734 setSystemPagesInaccessible(ptr, kSystemPageSize); | |
| 735 setSystemPagesInaccessible(slot + size, kSystemPageSize); | |
| 736 #endif | |
| 737 | |
| 738 PartitionSuperPageExtentEntry* extent = | |
| 739 reinterpret_cast<PartitionSuperPageExtentEntry*>( | |
| 740 partitionSuperPageToMetadataArea(ptr)); | |
| 741 extent->root = root; | |
| 742 // The new structures are all located inside a fresh system page so they | |
| 743 // will all be zeroed out. These ASSERTs are for documentation. | |
| 744 ASSERT(!extent->superPageBase); | |
| 745 ASSERT(!extent->superPagesEnd); | |
| 746 ASSERT(!extent->next); | |
| 747 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(slot); | |
| 748 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>( | |
| 749 reinterpret_cast<char*>(page) + (kPageMetadataSize * 2)); | |
| 750 ASSERT(!page->nextPage); | |
| 751 ASSERT(!page->numAllocatedSlots); | |
| 752 ASSERT(!page->numUnprovisionedSlots); | |
| 753 ASSERT(!page->pageOffset); | |
| 754 ASSERT(!page->emptyCacheIndex); | |
| 755 page->bucket = bucket; | |
| 756 page->freelistHead = reinterpret_cast<PartitionFreelistEntry*>(slot); | |
| 757 PartitionFreelistEntry* nextEntry = | |
| 758 reinterpret_cast<PartitionFreelistEntry*>(slot); | |
| 759 nextEntry->next = partitionFreelistMask(0); | |
| 760 | |
| 761 ASSERT(!bucket->activePagesHead); | |
| 762 ASSERT(!bucket->emptyPagesHead); | |
| 763 ASSERT(!bucket->decommittedPagesHead); | |
| 764 ASSERT(!bucket->numSystemPagesPerSlotSpan); | |
| 765 ASSERT(!bucket->numFullPages); | |
| 766 bucket->slotSize = size; | |
| 767 | |
| 768 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); | |
| 769 mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; | |
| 770 mapExtent->bucket = bucket; | |
| 771 | |
| 772 // Maintain the doubly-linked list of all direct mappings. | |
| 773 mapExtent->nextExtent = root->directMapList; | |
| 774 if (mapExtent->nextExtent) | |
| 775 mapExtent->nextExtent->prevExtent = mapExtent; | |
| 776 mapExtent->prevExtent = nullptr; | |
| 777 root->directMapList = mapExtent; | |
| 778 | |
| 779 return page; | |
| 780 } | |
| 781 | |
| 782 static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) { | |
| 783 PartitionRootBase* root = partitionPageToRoot(page); | |
| 784 const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page); | |
| 785 size_t unmapSize = extent->mapSize; | |
| 786 | |
| 787 // Maintain the doubly-linked list of all direct mappings. | |
| 788 if (extent->prevExtent) { | |
| 789 ASSERT(extent->prevExtent->nextExtent == extent); | |
| 790 extent->prevExtent->nextExtent = extent->nextExtent; | |
| 791 } else { | |
| 792 root->directMapList = extent->nextExtent; | |
| 793 } | |
| 794 if (extent->nextExtent) { | |
| 795 ASSERT(extent->nextExtent->prevExtent == extent); | |
| 796 extent->nextExtent->prevExtent = extent->prevExtent; | |
| 797 } | |
| 798 | |
| 799 // Add on the size of the trailing guard page and preceeding partition | |
| 800 // page. | |
| 801 unmapSize += kPartitionPageSize + kSystemPageSize; | |
| 802 | |
| 803 size_t uncommittedPageSize = page->bucket->slotSize + kSystemPageSize; | |
| 804 partitionDecreaseCommittedPages(root, uncommittedPageSize); | |
| 805 ASSERT(root->totalSizeOfDirectMappedPages >= uncommittedPageSize); | |
| 806 root->totalSizeOfDirectMappedPages -= uncommittedPageSize; | |
| 807 | |
| 808 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask)); | |
| 809 | |
| 810 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | |
| 811 // Account for the mapping starting a partition page before the actual | |
| 812 // allocation address. | |
| 813 ptr -= kPartitionPageSize; | |
| 814 | |
| 815 freePages(ptr, unmapSize); | |
| 816 } | |
| 817 | |
| 818 void* partitionAllocSlowPath(PartitionRootBase* root, | |
| 819 int flags, | |
| 820 size_t size, | |
| 821 PartitionBucket* bucket) { | |
| 822 // The slow path is called when the freelist is empty. | |
| 823 ASSERT(!bucket->activePagesHead->freelistHead); | |
| 824 | |
| 825 PartitionPage* newPage = nullptr; | |
| 826 | |
| 827 // For the partitionAllocGeneric API, we have a bunch of buckets marked | |
| 828 // as special cases. We bounce them through to the slow path so that we | |
| 829 // can still have a blazing fast hot path due to lack of corner-case | |
| 830 // branches. | |
| 831 bool returnNull = flags & PartitionAllocReturnNull; | |
| 832 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | |
| 833 ASSERT(size > kGenericMaxBucketed); | |
| 834 ASSERT(bucket == &PartitionRootBase::gPagedBucket); | |
| 835 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); | |
| 836 if (size > kGenericMaxDirectMapped) { | |
| 837 if (returnNull) | |
| 838 return nullptr; | |
| 839 partitionExcessiveAllocationSize(); | |
| 840 } | |
| 841 newPage = partitionDirectMap(root, flags, size); | |
| 842 } else if (LIKELY(partitionSetNewActivePage(bucket))) { | |
| 843 // First, did we find an active page in the active pages list? | |
| 844 newPage = bucket->activePagesHead; | |
| 845 ASSERT(partitionPageStateIsActive(newPage)); | |
| 846 } else if (LIKELY(bucket->emptyPagesHead != nullptr) || | |
| 847 LIKELY(bucket->decommittedPagesHead != nullptr)) { | |
| 848 // Second, look in our lists of empty and decommitted pages. | |
| 849 // Check empty pages first, which are preferred, but beware that an | |
| 850 // empty page might have been decommitted. | |
| 851 while (LIKELY((newPage = bucket->emptyPagesHead) != nullptr)) { | |
| 852 ASSERT(newPage->bucket == bucket); | |
| 853 ASSERT(partitionPageStateIsEmpty(newPage) || | |
| 854 partitionPageStateIsDecommitted(newPage)); | |
| 855 bucket->emptyPagesHead = newPage->nextPage; | |
| 856 // Accept the empty page unless it got decommitted. | |
| 857 if (newPage->freelistHead) { | |
| 858 newPage->nextPage = nullptr; | |
| 859 break; | |
| 860 } | |
| 861 ASSERT(partitionPageStateIsDecommitted(newPage)); | |
| 862 newPage->nextPage = bucket->decommittedPagesHead; | |
| 863 bucket->decommittedPagesHead = newPage; | |
| 864 } | |
| 865 if (UNLIKELY(!newPage) && LIKELY(bucket->decommittedPagesHead != nullptr)) { | |
| 866 newPage = bucket->decommittedPagesHead; | |
| 867 ASSERT(newPage->bucket == bucket); | |
| 868 ASSERT(partitionPageStateIsDecommitted(newPage)); | |
| 869 bucket->decommittedPagesHead = newPage->nextPage; | |
| 870 void* addr = partitionPageToPointer(newPage); | |
| 871 partitionRecommitSystemPages(root, addr, | |
| 872 partitionBucketBytes(newPage->bucket)); | |
| 873 partitionPageReset(newPage); | |
| 874 } | |
| 875 ASSERT(newPage); | |
| 876 } else { | |
| 877 // Third. If we get here, we need a brand new page. | |
| 878 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | |
| 879 void* rawPages = | |
| 880 partitionAllocPartitionPages(root, flags, numPartitionPages); | |
| 881 if (LIKELY(rawPages != nullptr)) { | |
| 882 newPage = partitionPointerToPageNoAlignmentCheck(rawPages); | |
| 883 partitionPageSetup(newPage, bucket); | |
| 884 } | |
| 885 } | |
| 886 | |
| 887 // Bail if we had a memory allocation failure. | |
| 888 if (UNLIKELY(!newPage)) { | |
| 889 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); | |
| 890 if (returnNull) | |
| 891 return nullptr; | |
| 892 partitionOutOfMemory(root); | |
| 893 } | |
| 894 | |
| 895 bucket = newPage->bucket; | |
| 896 ASSERT(bucket != &PartitionRootBase::gPagedBucket); | |
| 897 bucket->activePagesHead = newPage; | |
| 898 partitionPageSetRawSize(newPage, size); | |
| 899 | |
| 900 // If we found an active page with free slots, or an empty page, we have a | |
| 901 // usable freelist head. | |
| 902 if (LIKELY(newPage->freelistHead != nullptr)) { | |
| 903 PartitionFreelistEntry* entry = newPage->freelistHead; | |
| 904 PartitionFreelistEntry* newHead = partitionFreelistMask(entry->next); | |
| 905 newPage->freelistHead = newHead; | |
| 906 newPage->numAllocatedSlots++; | |
| 907 return entry; | |
| 908 } | |
| 909 // Otherwise, we need to build the freelist. | |
| 910 ASSERT(newPage->numUnprovisionedSlots); | |
| 911 return partitionPageAllocAndFillFreelist(newPage); | |
| 912 } | |
| 913 | |
| 914 static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, | |
| 915 PartitionPage* page) { | |
| 916 ASSERT(partitionPageStateIsEmpty(page)); | |
| 917 ASSERT(!partitionBucketIsDirectMapped(page->bucket)); | |
| 918 void* addr = partitionPageToPointer(page); | |
| 919 partitionDecommitSystemPages(root, addr, partitionBucketBytes(page->bucket)); | |
| 920 | |
| 921 // We actually leave the decommitted page in the active list. We'll sweep | |
| 922 // it on to the decommitted page list when we next walk the active page | |
| 923 // list. | |
| 924 // Pulling this trick enables us to use a singly-linked page list for all | |
| 925 // cases, which is critical in keeping the page metadata structure down to | |
| 926 // 32 bytes in size. | |
| 927 page->freelistHead = 0; | |
| 928 page->numUnprovisionedSlots = 0; | |
| 929 ASSERT(partitionPageStateIsDecommitted(page)); | |
| 930 } | |
| 931 | |
| 932 static void partitionDecommitPageIfPossible(PartitionRootBase* root, | |
| 933 PartitionPage* page) { | |
| 934 ASSERT(page->emptyCacheIndex >= 0); | |
| 935 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); | |
| 936 ASSERT(page == root->globalEmptyPageRing[page->emptyCacheIndex]); | |
| 937 page->emptyCacheIndex = -1; | |
| 938 if (partitionPageStateIsEmpty(page)) | |
| 939 partitionDecommitPage(root, page); | |
| 940 } | |
| 941 | |
| 942 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) { | |
| 943 ASSERT(partitionPageStateIsEmpty(page)); | |
| 944 PartitionRootBase* root = partitionPageToRoot(page); | |
| 945 | |
| 946 // If the page is already registered as empty, give it another life. | |
| 947 if (page->emptyCacheIndex != -1) { | |
| 948 ASSERT(page->emptyCacheIndex >= 0); | |
| 949 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); | |
| 950 ASSERT(root->globalEmptyPageRing[page->emptyCacheIndex] == page); | |
| 951 root->globalEmptyPageRing[page->emptyCacheIndex] = 0; | |
| 952 } | |
| 953 | |
| 954 int16_t currentIndex = root->globalEmptyPageRingIndex; | |
| 955 PartitionPage* pageToDecommit = root->globalEmptyPageRing[currentIndex]; | |
| 956 // The page might well have been re-activated, filled up, etc. before we get | |
| 957 // around to looking at it here. | |
| 958 if (pageToDecommit) | |
| 959 partitionDecommitPageIfPossible(root, pageToDecommit); | |
| 960 | |
| 961 // We put the empty slot span on our global list of "pages that were once | |
| 962 // empty". thus providing it a bit of breathing room to get re-used before | |
| 963 // we really free it. This improves performance, particularly on Mac OS X | |
| 964 // which has subpar memory management performance. | |
| 965 root->globalEmptyPageRing[currentIndex] = page; | |
| 966 page->emptyCacheIndex = currentIndex; | |
| 967 ++currentIndex; | |
| 968 if (currentIndex == kMaxFreeableSpans) | |
| 969 currentIndex = 0; | |
| 970 root->globalEmptyPageRingIndex = currentIndex; | |
| 971 } | |
| 972 | |
| 973 static void partitionDecommitEmptyPages(PartitionRootBase* root) { | |
| 974 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | |
| 975 PartitionPage* page = root->globalEmptyPageRing[i]; | |
| 976 if (page) | |
| 977 partitionDecommitPageIfPossible(root, page); | |
| 978 root->globalEmptyPageRing[i] = nullptr; | |
| 979 } | |
| 980 } | |
| 981 | |
| 982 void partitionFreeSlowPath(PartitionPage* page) { | |
| 983 PartitionBucket* bucket = page->bucket; | |
| 984 ASSERT(page != &PartitionRootGeneric::gSeedPage); | |
| 985 if (LIKELY(page->numAllocatedSlots == 0)) { | |
| 986 // Page became fully unused. | |
| 987 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | |
| 988 partitionDirectUnmap(page); | |
| 989 return; | |
| 990 } | |
| 991 // If it's the current active page, change it. We bounce the page to | |
| 992 // the empty list as a force towards defragmentation. | |
| 993 if (LIKELY(page == bucket->activePagesHead)) | |
| 994 (void)partitionSetNewActivePage(bucket); | |
| 995 ASSERT(bucket->activePagesHead != page); | |
| 996 | |
| 997 partitionPageSetRawSize(page, 0); | |
| 998 ASSERT(!partitionPageGetRawSize(page)); | |
| 999 | |
| 1000 partitionRegisterEmptyPage(page); | |
| 1001 } else { | |
| 1002 ASSERT(!partitionBucketIsDirectMapped(bucket)); | |
| 1003 // Ensure that the page is full. That's the only valid case if we | |
| 1004 // arrive here. | |
| 1005 ASSERT(page->numAllocatedSlots < 0); | |
| 1006 // A transition of numAllocatedSlots from 0 to -1 is not legal, and | |
| 1007 // likely indicates a double-free. | |
| 1008 SECURITY_CHECK(page->numAllocatedSlots != -1); | |
| 1009 page->numAllocatedSlots = -page->numAllocatedSlots - 2; | |
| 1010 ASSERT(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1); | |
| 1011 // Fully used page became partially used. It must be put back on the | |
| 1012 // non-full page list. Also make it the current page to increase the | |
| 1013 // chances of it being filled up again. The old current page will be | |
| 1014 // the next page. | |
| 1015 ASSERT(!page->nextPage); | |
| 1016 if (LIKELY(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage)) | |
| 1017 page->nextPage = bucket->activePagesHead; | |
| 1018 bucket->activePagesHead = page; | |
| 1019 --bucket->numFullPages; | |
| 1020 // Special case: for a partition page with just a single slot, it may | |
| 1021 // now be empty and we want to run it through the empty logic. | |
| 1022 if (UNLIKELY(page->numAllocatedSlots == 0)) | |
| 1023 partitionFreeSlowPath(page); | |
| 1024 } | |
| 1025 } | |
| 1026 | |
| 1027 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, | |
| 1028 PartitionPage* page, | |
| 1029 size_t rawSize) { | |
| 1030 ASSERT(partitionBucketIsDirectMapped(page->bucket)); | |
| 1031 | |
| 1032 rawSize = partitionCookieSizeAdjustAdd(rawSize); | |
| 1033 | |
| 1034 // Note that the new size might be a bucketed size; this function is called | |
| 1035 // whenever we're reallocating a direct mapped allocation. | |
| 1036 size_t newSize = partitionDirectMapSize(rawSize); | |
| 1037 if (newSize < kGenericMinDirectMappedDownsize) | |
| 1038 return false; | |
| 1039 | |
| 1040 // bucket->slotSize is the current size of the allocation. | |
| 1041 size_t currentSize = page->bucket->slotSize; | |
| 1042 if (newSize == currentSize) | |
| 1043 return true; | |
| 1044 | |
| 1045 char* charPtr = static_cast<char*>(partitionPageToPointer(page)); | |
| 1046 | |
| 1047 if (newSize < currentSize) { | |
| 1048 size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize; | |
| 1049 | |
| 1050 // Don't reallocate in-place if new size is less than 80 % of the full | |
| 1051 // map size, to avoid holding on to too much unused address space. | |
| 1052 if ((newSize / kSystemPageSize) * 5 < (mapSize / kSystemPageSize) * 4) | |
| 1053 return false; | |
| 1054 | |
| 1055 // Shrink by decommitting unneeded pages and making them inaccessible. | |
| 1056 size_t decommitSize = currentSize - newSize; | |
| 1057 partitionDecommitSystemPages(root, charPtr + newSize, decommitSize); | |
| 1058 setSystemPagesInaccessible(charPtr + newSize, decommitSize); | |
| 1059 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { | |
| 1060 // Grow within the actually allocated memory. Just need to make the | |
| 1061 // pages accessible again. | |
| 1062 size_t recommitSize = newSize - currentSize; | |
| 1063 bool ret = setSystemPagesAccessible(charPtr + currentSize, recommitSize); | |
| 1064 RELEASE_ASSERT(ret); | |
| 1065 partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize); | |
| 1066 | |
| 1067 #if ENABLE(ASSERT) | |
| 1068 memset(charPtr + currentSize, kUninitializedByte, recommitSize); | |
| 1069 #endif | |
| 1070 } else { | |
| 1071 // We can't perform the realloc in-place. | |
| 1072 // TODO: support this too when possible. | |
| 1073 return false; | |
| 1074 } | |
| 1075 | |
| 1076 #if ENABLE(ASSERT) | |
| 1077 // Write a new trailing cookie. | |
| 1078 partitionCookieWriteValue(charPtr + rawSize - kCookieSize); | |
| 1079 #endif | |
| 1080 | |
| 1081 partitionPageSetRawSize(page, rawSize); | |
| 1082 ASSERT(partitionPageGetRawSize(page) == rawSize); | |
| 1083 | |
| 1084 page->bucket->slotSize = newSize; | |
| 1085 return true; | |
| 1086 } | |
| 1087 | |
| 1088 void* partitionReallocGeneric(PartitionRootGeneric* root, | |
| 1089 void* ptr, | |
| 1090 size_t newSize, | |
| 1091 const char* typeName) { | |
| 1092 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | |
| 1093 return realloc(ptr, newSize); | |
| 1094 #else | |
| 1095 if (UNLIKELY(!ptr)) | |
| 1096 return partitionAllocGeneric(root, newSize, typeName); | |
| 1097 if (UNLIKELY(!newSize)) { | |
| 1098 partitionFreeGeneric(root, ptr); | |
| 1099 return 0; | |
| 1100 } | |
| 1101 | |
| 1102 if (newSize > kGenericMaxDirectMapped) | |
| 1103 partitionExcessiveAllocationSize(); | |
| 1104 | |
| 1105 ASSERT(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr))); | |
| 1106 | |
| 1107 PartitionPage* page = | |
| 1108 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | |
| 1109 | |
| 1110 if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) { | |
| 1111 // We may be able to perform the realloc in place by changing the | |
| 1112 // accessibility of memory pages and, if reducing the size, decommitting | |
| 1113 // them. | |
| 1114 if (partitionReallocDirectMappedInPlace(root, page, newSize)) { | |
| 1115 PartitionAllocHooks::reallocHookIfEnabled(ptr, ptr, newSize, typeName); | |
| 1116 return ptr; | |
| 1117 } | |
| 1118 } | |
| 1119 | |
| 1120 size_t actualNewSize = partitionAllocActualSize(root, newSize); | |
| 1121 size_t actualOldSize = partitionAllocGetSize(ptr); | |
| 1122 | |
| 1123 // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the | |
| 1124 // new size is a significant percentage smaller. We could do the same if we | |
| 1125 // determine it is a win. | |
| 1126 if (actualNewSize == actualOldSize) { | |
| 1127 // Trying to allocate a block of size newSize would give us a block of | |
| 1128 // the same size as the one we've already got, so no point in doing | |
| 1129 // anything here. | |
| 1130 return ptr; | |
| 1131 } | |
| 1132 | |
| 1133 // This realloc cannot be resized in-place. Sadness. | |
| 1134 void* ret = partitionAllocGeneric(root, newSize, typeName); | |
| 1135 size_t copySize = actualOldSize; | |
| 1136 if (newSize < copySize) | |
| 1137 copySize = newSize; | |
| 1138 | |
| 1139 memcpy(ret, ptr, copySize); | |
| 1140 partitionFreeGeneric(root, ptr); | |
| 1141 return ret; | |
| 1142 #endif | |
| 1143 } | |
| 1144 | |
| 1145 static size_t partitionPurgePage(PartitionPage* page, bool discard) { | |
| 1146 const PartitionBucket* bucket = page->bucket; | |
| 1147 size_t slotSize = bucket->slotSize; | |
| 1148 if (slotSize < kSystemPageSize || !page->numAllocatedSlots) | |
| 1149 return 0; | |
| 1150 | |
| 1151 size_t bucketNumSlots = partitionBucketSlots(bucket); | |
| 1152 size_t discardableBytes = 0; | |
| 1153 | |
| 1154 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | |
| 1155 if (rawSize) { | |
| 1156 uint32_t usedBytes = | |
| 1157 static_cast<uint32_t>(WTF::roundUpToSystemPage(rawSize)); | |
| 1158 discardableBytes = bucket->slotSize - usedBytes; | |
| 1159 if (discardableBytes && discard) { | |
| 1160 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | |
| 1161 ptr += usedBytes; | |
| 1162 discardSystemPages(ptr, discardableBytes); | |
| 1163 } | |
| 1164 return discardableBytes; | |
| 1165 } | |
| 1166 | |
| 1167 const size_t maxSlotCount = | |
| 1168 (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; | |
| 1169 ASSERT(bucketNumSlots <= maxSlotCount); | |
| 1170 ASSERT(page->numUnprovisionedSlots < bucketNumSlots); | |
| 1171 size_t numSlots = bucketNumSlots - page->numUnprovisionedSlots; | |
| 1172 char slotUsage[maxSlotCount]; | |
| 1173 size_t lastSlot = static_cast<size_t>(-1); | |
| 1174 memset(slotUsage, 1, numSlots); | |
| 1175 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | |
| 1176 PartitionFreelistEntry* entry = page->freelistHead; | |
| 1177 // First, walk the freelist for this page and make a bitmap of which slots | |
| 1178 // are not in use. | |
| 1179 while (entry) { | |
| 1180 size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slotSize; | |
| 1181 ASSERT(slotIndex < numSlots); | |
| 1182 slotUsage[slotIndex] = 0; | |
| 1183 entry = partitionFreelistMask(entry->next); | |
| 1184 // If we have a slot where the masked freelist entry is 0, we can | |
| 1185 // actually discard that freelist entry because touching a discarded | |
| 1186 // page is guaranteed to return original content or 0. | |
| 1187 // (Note that this optimization won't fire on big endian machines | |
| 1188 // because the masking function is negation.) | |
| 1189 if (!partitionFreelistMask(entry)) | |
| 1190 lastSlot = slotIndex; | |
| 1191 } | |
| 1192 | |
| 1193 // If the slot(s) at the end of the slot span are not in used, we can | |
| 1194 // truncate them entirely and rewrite the freelist. | |
| 1195 size_t truncatedSlots = 0; | |
| 1196 while (!slotUsage[numSlots - 1]) { | |
| 1197 truncatedSlots++; | |
| 1198 numSlots--; | |
| 1199 ASSERT(numSlots); | |
| 1200 } | |
| 1201 // First, do the work of calculating the discardable bytes. Don't actually | |
| 1202 // discard anything unless the discard flag was passed in. | |
| 1203 char* beginPtr = nullptr; | |
| 1204 char* endPtr = nullptr; | |
| 1205 size_t unprovisionedBytes = 0; | |
| 1206 if (truncatedSlots) { | |
| 1207 beginPtr = ptr + (numSlots * slotSize); | |
| 1208 endPtr = beginPtr + (slotSize * truncatedSlots); | |
| 1209 beginPtr = reinterpret_cast<char*>( | |
| 1210 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); | |
| 1211 // We round the end pointer here up and not down because we're at the | |
| 1212 // end of a slot span, so we "own" all the way up the page boundary. | |
| 1213 endPtr = reinterpret_cast<char*>( | |
| 1214 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(endPtr))); | |
| 1215 ASSERT(endPtr <= ptr + partitionBucketBytes(bucket)); | |
| 1216 if (beginPtr < endPtr) { | |
| 1217 unprovisionedBytes = endPtr - beginPtr; | |
| 1218 discardableBytes += unprovisionedBytes; | |
| 1219 } | |
| 1220 } | |
| 1221 if (unprovisionedBytes && discard) { | |
| 1222 ASSERT(truncatedSlots > 0); | |
| 1223 size_t numNewEntries = 0; | |
| 1224 page->numUnprovisionedSlots += static_cast<uint16_t>(truncatedSlots); | |
| 1225 // Rewrite the freelist. | |
| 1226 PartitionFreelistEntry** entryPtr = &page->freelistHead; | |
| 1227 for (size_t slotIndex = 0; slotIndex < numSlots; ++slotIndex) { | |
| 1228 if (slotUsage[slotIndex]) | |
| 1229 continue; | |
| 1230 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>( | |
| 1231 ptr + (slotSize * slotIndex)); | |
| 1232 *entryPtr = partitionFreelistMask(entry); | |
| 1233 entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry); | |
| 1234 numNewEntries++; | |
| 1235 } | |
| 1236 // Terminate the freelist chain. | |
| 1237 *entryPtr = nullptr; | |
| 1238 // The freelist head is stored unmasked. | |
| 1239 page->freelistHead = partitionFreelistMask(page->freelistHead); | |
| 1240 ASSERT(numNewEntries == numSlots - page->numAllocatedSlots); | |
| 1241 // Discard the memory. | |
| 1242 discardSystemPages(beginPtr, unprovisionedBytes); | |
| 1243 } | |
| 1244 | |
| 1245 // Next, walk the slots and for any not in use, consider where the system | |
| 1246 // page boundaries occur. We can release any system pages back to the | |
| 1247 // system as long as we don't interfere with a freelist pointer or an | |
| 1248 // adjacent slot. | |
| 1249 for (size_t i = 0; i < numSlots; ++i) { | |
| 1250 if (slotUsage[i]) | |
| 1251 continue; | |
| 1252 // The first address we can safely discard is just after the freelist | |
| 1253 // pointer. There's one quirk: if the freelist pointer is actually a | |
| 1254 // null, we can discard that pointer value too. | |
| 1255 char* beginPtr = ptr + (i * slotSize); | |
| 1256 char* endPtr = beginPtr + slotSize; | |
| 1257 if (i != lastSlot) | |
| 1258 beginPtr += sizeof(PartitionFreelistEntry); | |
| 1259 beginPtr = reinterpret_cast<char*>( | |
| 1260 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); | |
| 1261 endPtr = reinterpret_cast<char*>( | |
| 1262 WTF::roundDownToSystemPage(reinterpret_cast<size_t>(endPtr))); | |
| 1263 if (beginPtr < endPtr) { | |
| 1264 size_t partialSlotBytes = endPtr - beginPtr; | |
| 1265 discardableBytes += partialSlotBytes; | |
| 1266 if (discard) | |
| 1267 discardSystemPages(beginPtr, partialSlotBytes); | |
| 1268 } | |
| 1269 } | |
| 1270 return discardableBytes; | |
| 1271 } | |
| 1272 | |
| 1273 static void partitionPurgeBucket(PartitionBucket* bucket) { | |
| 1274 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { | |
| 1275 for (PartitionPage* page = bucket->activePagesHead; page; | |
| 1276 page = page->nextPage) { | |
| 1277 ASSERT(page != &PartitionRootGeneric::gSeedPage); | |
| 1278 (void)partitionPurgePage(page, true); | |
| 1279 } | |
| 1280 } | |
| 1281 } | |
| 1282 | |
| 1283 void partitionPurgeMemory(PartitionRoot* root, int flags) { | |
| 1284 if (flags & PartitionPurgeDecommitEmptyPages) | |
| 1285 partitionDecommitEmptyPages(root); | |
| 1286 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages | |
| 1287 // here because that flag is only useful for allocations >= system page | |
| 1288 // size. We only have allocations that large inside generic partitions | |
| 1289 // at the moment. | |
| 1290 } | |
| 1291 | |
| 1292 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) { | |
| 1293 SpinLock::Guard guard(root->lock); | |
| 1294 if (flags & PartitionPurgeDecommitEmptyPages) | |
| 1295 partitionDecommitEmptyPages(root); | |
| 1296 if (flags & PartitionPurgeDiscardUnusedSystemPages) { | |
| 1297 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | |
| 1298 PartitionBucket* bucket = &root->buckets[i]; | |
| 1299 if (bucket->slotSize >= kSystemPageSize) | |
| 1300 partitionPurgeBucket(bucket); | |
| 1301 } | |
| 1302 } | |
| 1303 } | |
| 1304 | |
| 1305 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, | |
| 1306 const PartitionPage* page) { | |
| 1307 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); | |
| 1308 | |
| 1309 if (partitionPageStateIsDecommitted(page)) { | |
| 1310 ++statsOut->numDecommittedPages; | |
| 1311 return; | |
| 1312 } | |
| 1313 | |
| 1314 statsOut->discardableBytes += | |
| 1315 partitionPurgePage(const_cast<PartitionPage*>(page), false); | |
| 1316 | |
| 1317 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | |
| 1318 if (rawSize) | |
| 1319 statsOut->activeBytes += static_cast<uint32_t>(rawSize); | |
| 1320 else | |
| 1321 statsOut->activeBytes += | |
| 1322 (page->numAllocatedSlots * statsOut->bucketSlotSize); | |
| 1323 | |
| 1324 size_t pageBytesResident = | |
| 1325 WTF::roundUpToSystemPage((bucketNumSlots - page->numUnprovisionedSlots) * | |
| 1326 statsOut->bucketSlotSize); | |
| 1327 statsOut->residentBytes += pageBytesResident; | |
| 1328 if (partitionPageStateIsEmpty(page)) { | |
| 1329 statsOut->decommittableBytes += pageBytesResident; | |
| 1330 ++statsOut->numEmptyPages; | |
| 1331 } else if (partitionPageStateIsFull(page)) { | |
| 1332 ++statsOut->numFullPages; | |
| 1333 } else { | |
| 1334 ASSERT(partitionPageStateIsActive(page)); | |
| 1335 ++statsOut->numActivePages; | |
| 1336 } | |
| 1337 } | |
| 1338 | |
| 1339 static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, | |
| 1340 const PartitionBucket* bucket) { | |
| 1341 ASSERT(!partitionBucketIsDirectMapped(bucket)); | |
| 1342 statsOut->isValid = false; | |
| 1343 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), | |
| 1344 // the bucket might still need to be reported if it has a list of empty, | |
| 1345 // decommitted or full pages. | |
| 1346 if (bucket->activePagesHead == &PartitionRootGeneric::gSeedPage && | |
| 1347 !bucket->emptyPagesHead && !bucket->decommittedPagesHead && | |
| 1348 !bucket->numFullPages) | |
| 1349 return; | |
| 1350 | |
| 1351 memset(statsOut, '\0', sizeof(*statsOut)); | |
| 1352 statsOut->isValid = true; | |
| 1353 statsOut->isDirectMap = false; | |
| 1354 statsOut->numFullPages = static_cast<size_t>(bucket->numFullPages); | |
| 1355 statsOut->bucketSlotSize = bucket->slotSize; | |
| 1356 uint16_t bucketNumSlots = partitionBucketSlots(bucket); | |
| 1357 size_t bucketUsefulStorage = statsOut->bucketSlotSize * bucketNumSlots; | |
| 1358 statsOut->allocatedPageSize = partitionBucketBytes(bucket); | |
| 1359 statsOut->activeBytes = bucket->numFullPages * bucketUsefulStorage; | |
| 1360 statsOut->residentBytes = bucket->numFullPages * statsOut->allocatedPageSize; | |
| 1361 | |
| 1362 for (const PartitionPage* page = bucket->emptyPagesHead; page; | |
| 1363 page = page->nextPage) { | |
| 1364 ASSERT(partitionPageStateIsEmpty(page) || | |
| 1365 partitionPageStateIsDecommitted(page)); | |
| 1366 partitionDumpPageStats(statsOut, page); | |
| 1367 } | |
| 1368 for (const PartitionPage* page = bucket->decommittedPagesHead; page; | |
| 1369 page = page->nextPage) { | |
| 1370 ASSERT(partitionPageStateIsDecommitted(page)); | |
| 1371 partitionDumpPageStats(statsOut, page); | |
| 1372 } | |
| 1373 | |
| 1374 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { | |
| 1375 for (const PartitionPage* page = bucket->activePagesHead; page; | |
| 1376 page = page->nextPage) { | |
| 1377 ASSERT(page != &PartitionRootGeneric::gSeedPage); | |
| 1378 partitionDumpPageStats(statsOut, page); | |
| 1379 } | |
| 1380 } | |
| 1381 } | |
| 1382 | |
| 1383 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, | |
| 1384 const char* partitionName, | |
| 1385 bool isLightDump, | |
| 1386 PartitionStatsDumper* partitionStatsDumper) { | |
| 1387 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; | |
| 1388 static const size_t kMaxReportableDirectMaps = 4096; | |
| 1389 uint32_t directMapLengths[kMaxReportableDirectMaps]; | |
| 1390 size_t numDirectMappedAllocations = 0; | |
| 1391 | |
| 1392 { | |
| 1393 SpinLock::Guard guard(partition->lock); | |
| 1394 | |
| 1395 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | |
| 1396 const PartitionBucket* bucket = &partition->buckets[i]; | |
| 1397 // Don't report the pseudo buckets that the generic allocator sets up in | |
| 1398 // order to preserve a fast size->bucket map (see | |
| 1399 // partitionAllocGenericInit for details). | |
| 1400 if (!bucket->activePagesHead) | |
| 1401 bucketStats[i].isValid = false; | |
| 1402 else | |
| 1403 partitionDumpBucketStats(&bucketStats[i], bucket); | |
| 1404 } | |
| 1405 | |
| 1406 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; | |
| 1407 extent = extent->nextExtent) { | |
| 1408 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent); | |
| 1409 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; | |
| 1410 ++numDirectMappedAllocations; | |
| 1411 if (numDirectMappedAllocations == kMaxReportableDirectMaps) | |
| 1412 break; | |
| 1413 } | |
| 1414 } | |
| 1415 | |
| 1416 // partitionsDumpBucketStats is called after collecting stats because it | |
| 1417 // can try to allocate using PartitionAllocGeneric and it can't obtain the | |
| 1418 // lock. | |
| 1419 PartitionMemoryStats partitionStats = {0}; | |
| 1420 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages + | |
| 1421 partition->totalSizeOfDirectMappedPages; | |
| 1422 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; | |
| 1423 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | |
| 1424 if (bucketStats[i].isValid) { | |
| 1425 partitionStats.totalResidentBytes += bucketStats[i].residentBytes; | |
| 1426 partitionStats.totalActiveBytes += bucketStats[i].activeBytes; | |
| 1427 partitionStats.totalDecommittableBytes += | |
| 1428 bucketStats[i].decommittableBytes; | |
| 1429 partitionStats.totalDiscardableBytes += bucketStats[i].discardableBytes; | |
| 1430 if (!isLightDump) | |
| 1431 partitionStatsDumper->partitionsDumpBucketStats(partitionName, | |
| 1432 &bucketStats[i]); | |
| 1433 } | |
| 1434 } | |
| 1435 | |
| 1436 size_t directMappedAllocationsTotalSize = 0; | |
| 1437 for (size_t i = 0; i < numDirectMappedAllocations; ++i) { | |
| 1438 uint32_t size = directMapLengths[i]; | |
| 1439 directMappedAllocationsTotalSize += size; | |
| 1440 if (isLightDump) | |
| 1441 continue; | |
| 1442 | |
| 1443 PartitionBucketMemoryStats stats; | |
| 1444 memset(&stats, '\0', sizeof(stats)); | |
| 1445 stats.isValid = true; | |
| 1446 stats.isDirectMap = true; | |
| 1447 stats.numFullPages = 1; | |
| 1448 stats.allocatedPageSize = size; | |
| 1449 stats.bucketSlotSize = size; | |
| 1450 stats.activeBytes = size; | |
| 1451 stats.residentBytes = size; | |
| 1452 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &stats); | |
| 1453 } | |
| 1454 partitionStats.totalResidentBytes += directMappedAllocationsTotalSize; | |
| 1455 partitionStats.totalActiveBytes += directMappedAllocationsTotalSize; | |
| 1456 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | |
| 1457 } | |
| 1458 | |
| 1459 void partitionDumpStats(PartitionRoot* partition, | |
| 1460 const char* partitionName, | |
| 1461 bool isLightDump, | |
| 1462 PartitionStatsDumper* partitionStatsDumper) { | |
| 1463 static const size_t kMaxReportableBuckets = 4096 / sizeof(void*); | |
| 1464 PartitionBucketMemoryStats memoryStats[kMaxReportableBuckets]; | |
| 1465 const size_t partitionNumBuckets = partition->numBuckets; | |
| 1466 ASSERT(partitionNumBuckets <= kMaxReportableBuckets); | |
| 1467 | |
| 1468 for (size_t i = 0; i < partitionNumBuckets; ++i) | |
| 1469 partitionDumpBucketStats(&memoryStats[i], &partition->buckets()[i]); | |
| 1470 | |
| 1471 // partitionsDumpBucketStats is called after collecting stats because it | |
| 1472 // can use PartitionAlloc to allocate and this can affect the statistics. | |
| 1473 PartitionMemoryStats partitionStats = {0}; | |
| 1474 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages; | |
| 1475 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; | |
| 1476 ASSERT(!partition->totalSizeOfDirectMappedPages); | |
| 1477 for (size_t i = 0; i < partitionNumBuckets; ++i) { | |
| 1478 if (memoryStats[i].isValid) { | |
| 1479 partitionStats.totalResidentBytes += memoryStats[i].residentBytes; | |
| 1480 partitionStats.totalActiveBytes += memoryStats[i].activeBytes; | |
| 1481 partitionStats.totalDecommittableBytes += | |
| 1482 memoryStats[i].decommittableBytes; | |
| 1483 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBytes; | |
| 1484 if (!isLightDump) | |
| 1485 partitionStatsDumper->partitionsDumpBucketStats(partitionName, | |
| 1486 &memoryStats[i]); | |
| 1487 } | |
| 1488 } | |
| 1489 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | |
| 1490 } | |
| 1491 | |
| 1492 } // namespace WTF | |
| OLD | NEW |