| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 20 matching lines...) Expand all Loading... |
| 31 #include "wtf/PartitionAlloc.h" | 31 #include "wtf/PartitionAlloc.h" |
| 32 | 32 |
| 33 #include <string.h> | 33 #include <string.h> |
| 34 | 34 |
| 35 #ifndef NDEBUG | 35 #ifndef NDEBUG |
| 36 #include <stdio.h> | 36 #include <stdio.h> |
| 37 #endif | 37 #endif |
| 38 | 38 |
| 39 // Two partition pages are used as guard / metadata page so make sure the super | 39 // Two partition pages are used as guard / metadata page so make sure the super |
| 40 // page size is bigger. | 40 // page size is bigger. |
| 41 static_assert(WTF::kPartitionPageSize * 4 <= WTF::kSuperPageSize, "ok super page
size"); | 41 static_assert(WTF::kPartitionPageSize * 4 <= WTF::kSuperPageSize, |
| 42 static_assert(!(WTF::kSuperPageSize % WTF::kPartitionPageSize), "ok super page m
ultiple"); | 42 "ok super page size"); |
| 43 static_assert(!(WTF::kSuperPageSize % WTF::kPartitionPageSize), |
| 44 "ok super page multiple"); |
| 43 // Four system pages gives us room to hack out a still-guard-paged piece | 45 // Four system pages gives us room to hack out a still-guard-paged piece |
| 44 // of metadata in the middle of a guard partition page. | 46 // of metadata in the middle of a guard partition page. |
| 45 static_assert(WTF::kSystemPageSize * 4 <= WTF::kPartitionPageSize, "ok partition
page size"); | 47 static_assert(WTF::kSystemPageSize * 4 <= WTF::kPartitionPageSize, |
| 46 static_assert(!(WTF::kPartitionPageSize % WTF::kSystemPageSize), "ok partition p
age multiple"); | 48 "ok partition page size"); |
| 47 static_assert(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, "PartitionPa
ge should not be too big"); | 49 static_assert(!(WTF::kPartitionPageSize % WTF::kSystemPageSize), |
| 48 static_assert(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, "Partition
Bucket should not be too big"); | 50 "ok partition page multiple"); |
| 49 static_assert(sizeof(WTF::PartitionSuperPageExtentEntry) <= WTF::kPageMetadataSi
ze, "PartitionSuperPageExtentEntry should not be too big"); | 51 static_assert(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, |
| 50 static_assert(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= WT
F::kSystemPageSize, "page metadata fits in hole"); | 52 "PartitionPage should not be too big"); |
| 53 static_assert(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, |
| 54 "PartitionBucket should not be too big"); |
| 55 static_assert(sizeof(WTF::PartitionSuperPageExtentEntry) <= |
| 56 WTF::kPageMetadataSize, |
| 57 "PartitionSuperPageExtentEntry should not be too big"); |
| 58 static_assert(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= |
| 59 WTF::kSystemPageSize, |
| 60 "page metadata fits in hole"); |
| 51 // Check that some of our zanier calculations worked out as expected. | 61 // Check that some of our zanier calculations worked out as expected. |
| 52 static_assert(WTF::kGenericSmallestBucket == 8, "generic smallest bucket"); | 62 static_assert(WTF::kGenericSmallestBucket == 8, "generic smallest bucket"); |
| 53 static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed"); | 63 static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed"); |
| 54 | 64 |
| 55 namespace WTF { | 65 namespace WTF { |
| 56 | 66 |
| 57 SpinLock PartitionRootBase::gInitializedLock; | 67 SpinLock PartitionRootBase::gInitializedLock; |
| 58 bool PartitionRootBase::gInitialized = false; | 68 bool PartitionRootBase::gInitialized = false; |
| 59 PartitionPage PartitionRootBase::gSeedPage; | 69 PartitionPage PartitionRootBase::gSeedPage; |
| 60 PartitionBucket PartitionRootBase::gPagedBucket; | 70 PartitionBucket PartitionRootBase::gPagedBucket; |
| 61 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; | 71 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; |
| 62 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = nul
lptr; | 72 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = |
| 73 nullptr; |
| 63 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; | 74 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; |
| 64 | 75 |
| 65 static uint16_t partitionBucketNumSystemPages(size_t size) | 76 static uint16_t partitionBucketNumSystemPages(size_t size) { |
| 66 { | 77 // This works out reasonably for the current bucket sizes of the generic |
| 67 // This works out reasonably for the current bucket sizes of the generic | 78 // allocator, and the current values of partition page size and constants. |
| 68 // allocator, and the current values of partition page size and constants. | 79 // Specifically, we have enough room to always pack the slots perfectly into |
| 69 // Specifically, we have enough room to always pack the slots perfectly into | 80 // some number of system pages. The only waste is the waste associated with |
| 70 // some number of system pages. The only waste is the waste associated with | 81 // unfaulted pages (i.e. wasted address space). |
| 71 // unfaulted pages (i.e. wasted address space). | 82 // TODO: we end up using a lot of system pages for very small sizes. For |
| 72 // TODO: we end up using a lot of system pages for very small sizes. For | 83 // example, we'll use 12 system pages for slot size 24. The slot size is |
| 73 // example, we'll use 12 system pages for slot size 24. The slot size is | 84 // so small that the waste would be tiny with just 4, or 1, system pages. |
| 74 // so small that the waste would be tiny with just 4, or 1, system pages. | 85 // Later, we can investigate whether there are anti-fragmentation benefits |
| 75 // Later, we can investigate whether there are anti-fragmentation benefits | 86 // to using fewer system pages. |
| 76 // to using fewer system pages. | 87 double bestWasteRatio = 1.0f; |
| 77 double bestWasteRatio = 1.0f; | 88 uint16_t bestPages = 0; |
| 78 uint16_t bestPages = 0; | 89 if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { |
| 79 if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { | 90 ASSERT(!(size % kSystemPageSize)); |
| 80 ASSERT(!(size % kSystemPageSize)); | 91 return static_cast<uint16_t>(size / kSystemPageSize); |
| 81 return static_cast<uint16_t>(size / kSystemPageSize); | 92 } |
| 82 } | 93 ASSERT(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); |
| 83 ASSERT(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); | 94 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; |
| 84 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; i <= kMaxSystemPagesP
erSlotSpan; ++i) { | 95 i <= kMaxSystemPagesPerSlotSpan; ++i) { |
| 85 size_t pageSize = kSystemPageSize * i; | 96 size_t pageSize = kSystemPageSize * i; |
| 86 size_t numSlots = pageSize / size; | 97 size_t numSlots = pageSize / size; |
| 87 size_t waste = pageSize - (numSlots * size); | 98 size_t waste = pageSize - (numSlots * size); |
| 88 // Leaving a page unfaulted is not free; the page will occupy an empty p
age table entry. | 99 // Leaving a page unfaulted is not free; the page will occupy an empty page
table entry. |
| 89 // Make a simple attempt to account for that. | 100 // Make a simple attempt to account for that. |
| 90 size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); | 101 size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); |
| 91 size_t numUnfaultedPages = numRemainderPages ? (kNumSystemPagesPerPartit
ionPage - numRemainderPages) : 0; | 102 size_t numUnfaultedPages = |
| 92 waste += sizeof(void*) * numUnfaultedPages; | 103 numRemainderPages |
| 93 double wasteRatio = (double) waste / (double) pageSize; | 104 ? (kNumSystemPagesPerPartitionPage - numRemainderPages) |
| 94 if (wasteRatio < bestWasteRatio) { | 105 : 0; |
| 95 bestWasteRatio = wasteRatio; | 106 waste += sizeof(void*) * numUnfaultedPages; |
| 96 bestPages = i; | 107 double wasteRatio = (double)waste / (double)pageSize; |
| 97 } | 108 if (wasteRatio < bestWasteRatio) { |
| 98 } | 109 bestWasteRatio = wasteRatio; |
| 99 ASSERT(bestPages > 0); | 110 bestPages = i; |
| 100 return bestPages; | 111 } |
| 101 } | 112 } |
| 102 | 113 ASSERT(bestPages > 0); |
| 103 static void partitionAllocBaseInit(PartitionRootBase* root) | 114 return bestPages; |
| 104 { | 115 } |
| 105 ASSERT(!root->initialized); | 116 |
| 106 { | 117 static void partitionAllocBaseInit(PartitionRootBase* root) { |
| 107 SpinLock::Guard guard(PartitionRootBase::gInitializedLock); | 118 ASSERT(!root->initialized); |
| 108 if (!PartitionRootBase::gInitialized) { | 119 { |
| 109 PartitionRootBase::gInitialized = true; | 120 SpinLock::Guard guard(PartitionRootBase::gInitializedLock); |
| 110 // We mark the seed page as free to make sure it is skipped by our | 121 if (!PartitionRootBase::gInitialized) { |
| 111 // logic to find a new active page. | 122 PartitionRootBase::gInitialized = true; |
| 112 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGene
ric::gSeedPage; | 123 // We mark the seed page as free to make sure it is skipped by our |
| 113 } | 124 // logic to find a new active page. |
| 114 } | 125 PartitionRootBase::gPagedBucket.activePagesHead = |
| 115 | 126 &PartitionRootGeneric::gSeedPage; |
| 116 root->initialized = true; | 127 } |
| 117 root->totalSizeOfCommittedPages = 0; | 128 } |
| 118 root->totalSizeOfSuperPages = 0; | 129 |
| 119 root->totalSizeOfDirectMappedPages = 0; | 130 root->initialized = true; |
| 120 root->nextSuperPage = 0; | 131 root->totalSizeOfCommittedPages = 0; |
| 121 root->nextPartitionPage = 0; | 132 root->totalSizeOfSuperPages = 0; |
| 122 root->nextPartitionPageEnd = 0; | 133 root->totalSizeOfDirectMappedPages = 0; |
| 123 root->firstExtent = 0; | 134 root->nextSuperPage = 0; |
| 124 root->currentExtent = 0; | 135 root->nextPartitionPage = 0; |
| 125 root->directMapList = 0; | 136 root->nextPartitionPageEnd = 0; |
| 126 | 137 root->firstExtent = 0; |
| 127 memset(&root->globalEmptyPageRing, '\0', sizeof(root->globalEmptyPageRing)); | 138 root->currentExtent = 0; |
| 128 root->globalEmptyPageRingIndex = 0; | 139 root->directMapList = 0; |
| 129 | 140 |
| 130 // This is a "magic" value so we can test if a root pointer is valid. | 141 memset(&root->globalEmptyPageRing, '\0', sizeof(root->globalEmptyPageRing)); |
| 131 root->invertedSelf = ~reinterpret_cast<uintptr_t>(root); | 142 root->globalEmptyPageRingIndex = 0; |
| 132 } | 143 |
| 133 | 144 // This is a "magic" value so we can test if a root pointer is valid. |
| 134 static void partitionBucketInitBase(PartitionBucket* bucket, PartitionRootBase*
root) | 145 root->invertedSelf = ~reinterpret_cast<uintptr_t>(root); |
| 135 { | 146 } |
| 136 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | 147 |
| 137 bucket->emptyPagesHead = 0; | 148 static void partitionBucketInitBase(PartitionBucket* bucket, |
| 138 bucket->decommittedPagesHead = 0; | 149 PartitionRootBase* root) { |
| 139 bucket->numFullPages = 0; | 150 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; |
| 140 bucket->numSystemPagesPerSlotSpan = partitionBucketNumSystemPages(bucket->sl
otSize); | 151 bucket->emptyPagesHead = 0; |
| 141 } | 152 bucket->decommittedPagesHead = 0; |
| 142 | 153 bucket->numFullPages = 0; |
| 143 void partitionAllocGlobalInit(void (*oomHandlingFunction)()) | 154 bucket->numSystemPagesPerSlotSpan = |
| 144 { | 155 partitionBucketNumSystemPages(bucket->slotSize); |
| 145 ASSERT(oomHandlingFunction); | 156 } |
| 146 PartitionRootBase::gOomHandlingFunction = oomHandlingFunction; | 157 |
| 147 } | 158 void partitionAllocGlobalInit(void (*oomHandlingFunction)()) { |
| 148 | 159 ASSERT(oomHandlingFunction); |
| 149 void partitionAllocInit(PartitionRoot* root, size_t numBuckets, size_t maxAlloca
tion) | 160 PartitionRootBase::gOomHandlingFunction = oomHandlingFunction; |
| 150 { | 161 } |
| 151 partitionAllocBaseInit(root); | 162 |
| 152 | 163 void partitionAllocInit(PartitionRoot* root, |
| 153 root->numBuckets = numBuckets; | 164 size_t numBuckets, |
| 154 root->maxAllocation = maxAllocation; | 165 size_t maxAllocation) { |
| 155 size_t i; | 166 partitionAllocBaseInit(root); |
| 156 for (i = 0; i < root->numBuckets; ++i) { | 167 |
| 157 PartitionBucket* bucket = &root->buckets()[i]; | 168 root->numBuckets = numBuckets; |
| 158 if (!i) | 169 root->maxAllocation = maxAllocation; |
| 159 bucket->slotSize = kAllocationGranularity; | 170 size_t i; |
| 160 else | 171 for (i = 0; i < root->numBuckets; ++i) { |
| 161 bucket->slotSize = i << kBucketShift; | 172 PartitionBucket* bucket = &root->buckets()[i]; |
| 162 partitionBucketInitBase(bucket, root); | 173 if (!i) |
| 163 } | 174 bucket->slotSize = kAllocationGranularity; |
| 164 } | 175 else |
| 165 | 176 bucket->slotSize = i << kBucketShift; |
| 166 void partitionAllocGenericInit(PartitionRootGeneric* root) | 177 partitionBucketInitBase(bucket, root); |
| 167 { | 178 } |
| 168 SpinLock::Guard guard(root->lock); | 179 } |
| 169 | 180 |
| 170 partitionAllocBaseInit(root); | 181 void partitionAllocGenericInit(PartitionRootGeneric* root) { |
| 171 | 182 SpinLock::Guard guard(root->lock); |
| 172 // Precalculate some shift and mask constants used in the hot path. | 183 |
| 173 // Example: malloc(41) == 101001 binary. | 184 partitionAllocBaseInit(root); |
| 174 // Order is 6 (1 << 6-1)==32 is highest bit set. | 185 |
| 175 // orderIndex is the next three MSB == 010 == 2. | 186 // Precalculate some shift and mask constants used in the hot path. |
| 176 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 f
or the subOrderIndex). | 187 // Example: malloc(41) == 101001 binary. |
| 177 size_t order; | 188 // Order is 6 (1 << 6-1)==32 is highest bit set. |
| 178 for (order = 0; order <= kBitsPerSizet; ++order) { | 189 // orderIndex is the next three MSB == 010 == 2. |
| 179 size_t orderIndexShift; | 190 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for
the subOrderIndex). |
| 180 if (order < kGenericNumBucketsPerOrderBits + 1) | 191 size_t order; |
| 181 orderIndexShift = 0; | 192 for (order = 0; order <= kBitsPerSizet; ++order) { |
| 182 else | 193 size_t orderIndexShift; |
| 183 orderIndexShift = order - (kGenericNumBucketsPerOrderBits + 1); | 194 if (order < kGenericNumBucketsPerOrderBits + 1) |
| 184 root->orderIndexShifts[order] = orderIndexShift; | 195 orderIndexShift = 0; |
| 185 size_t subOrderIndexMask; | 196 else |
| 186 if (order == kBitsPerSizet) { | 197 orderIndexShift = order - (kGenericNumBucketsPerOrderBits + 1); |
| 187 // This avoids invoking undefined behavior for an excessive shift. | 198 root->orderIndexShifts[order] = orderIndexShift; |
| 188 subOrderIndexMask = static_cast<size_t>(-1) >> (kGenericNumBucketsPe
rOrderBits + 1); | 199 size_t subOrderIndexMask; |
| 189 } else { | 200 if (order == kBitsPerSizet) { |
| 190 subOrderIndexMask = ((static_cast<size_t>(1) << order) - 1) >> (kGen
ericNumBucketsPerOrderBits + 1); | 201 // This avoids invoking undefined behavior for an excessive shift. |
| 191 } | 202 subOrderIndexMask = |
| 192 root->orderSubIndexMasks[order] = subOrderIndexMask; | 203 static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1); |
| 193 } | 204 } else { |
| 194 | 205 subOrderIndexMask = ((static_cast<size_t>(1) << order) - 1) >> |
| 195 // Set up the actual usable buckets first. | 206 (kGenericNumBucketsPerOrderBits + 1); |
| 196 // Note that typical values (i.e. min allocation size of 8) will result in | 207 } |
| 197 // pseudo buckets (size==9 etc. or more generally, size is not a multiple | 208 root->orderSubIndexMasks[order] = subOrderIndexMask; |
| 198 // of the smallest allocation granularity). | 209 } |
| 199 // We avoid them in the bucket lookup map, but we tolerate them to keep the | 210 |
| 200 // code simpler and the structures more generic. | 211 // Set up the actual usable buckets first. |
| 201 size_t i, j; | 212 // Note that typical values (i.e. min allocation size of 8) will result in |
| 202 size_t currentSize = kGenericSmallestBucket; | 213 // pseudo buckets (size==9 etc. or more generally, size is not a multiple |
| 203 size_t currentIncrement = kGenericSmallestBucket >> kGenericNumBucketsPerOrd
erBits; | 214 // of the smallest allocation granularity). |
| 204 PartitionBucket* bucket = &root->buckets[0]; | 215 // We avoid them in the bucket lookup map, but we tolerate them to keep the |
| 205 for (i = 0; i < kGenericNumBucketedOrders; ++i) { | 216 // code simpler and the structures more generic. |
| 206 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { | 217 size_t i, j; |
| 207 bucket->slotSize = currentSize; | 218 size_t currentSize = kGenericSmallestBucket; |
| 208 partitionBucketInitBase(bucket, root); | 219 size_t currentIncrement = |
| 209 // Disable psuedo buckets so that touching them faults. | 220 kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits; |
| 210 if (currentSize % kGenericSmallestBucket) | 221 PartitionBucket* bucket = &root->buckets[0]; |
| 211 bucket->activePagesHead = 0; | 222 for (i = 0; i < kGenericNumBucketedOrders; ++i) { |
| 212 currentSize += currentIncrement; | 223 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { |
| 213 ++bucket; | 224 bucket->slotSize = currentSize; |
| 214 } | 225 partitionBucketInitBase(bucket, root); |
| 215 currentIncrement <<= 1; | 226 // Disable psuedo buckets so that touching them faults. |
| 216 } | 227 if (currentSize % kGenericSmallestBucket) |
| 217 ASSERT(currentSize == 1 << kGenericMaxBucketedOrder); | 228 bucket->activePagesHead = 0; |
| 218 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); | 229 currentSize += currentIncrement; |
| 219 | 230 ++bucket; |
| 220 // Then set up the fast size -> bucket lookup table. | 231 } |
| 221 bucket = &root->buckets[0]; | 232 currentIncrement <<= 1; |
| 222 PartitionBucket** bucketPtr = &root->bucketLookups[0]; | 233 } |
| 223 for (order = 0; order <= kBitsPerSizet; ++order) { | 234 ASSERT(currentSize == 1 << kGenericMaxBucketedOrder); |
| 224 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { | 235 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); |
| 225 if (order < kGenericMinBucketedOrder) { | 236 |
| 226 // Use the bucket of the finest granularity for malloc(0) etc. | 237 // Then set up the fast size -> bucket lookup table. |
| 227 *bucketPtr++ = &root->buckets[0]; | 238 bucket = &root->buckets[0]; |
| 228 } else if (order > kGenericMaxBucketedOrder) { | 239 PartitionBucket** bucketPtr = &root->bucketLookups[0]; |
| 229 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket; | 240 for (order = 0; order <= kBitsPerSizet; ++order) { |
| 230 } else { | 241 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { |
| 231 PartitionBucket* validBucket = bucket; | 242 if (order < kGenericMinBucketedOrder) { |
| 232 // Skip over invalid buckets. | 243 // Use the bucket of the finest granularity for malloc(0) etc. |
| 233 while (validBucket->slotSize % kGenericSmallestBucket) | 244 *bucketPtr++ = &root->buckets[0]; |
| 234 validBucket++; | 245 } else if (order > kGenericMaxBucketedOrder) { |
| 235 *bucketPtr++ = validBucket; | 246 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket; |
| 236 bucket++; | 247 } else { |
| 237 } | 248 PartitionBucket* validBucket = bucket; |
| 238 } | 249 // Skip over invalid buckets. |
| 239 } | 250 while (validBucket->slotSize % kGenericSmallestBucket) |
| 240 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); | 251 validBucket++; |
| 241 ASSERT(bucketPtr == &root->bucketLookups[0] + ((kBitsPerSizet + 1) * kGeneri
cNumBucketsPerOrder)); | 252 *bucketPtr++ = validBucket; |
| 242 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), | 253 bucket++; |
| 243 // which tries to overflow to a non-existant order. | 254 } |
| 244 *bucketPtr = &PartitionRootGeneric::gPagedBucket; | 255 } |
| 245 } | 256 } |
| 246 | 257 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); |
| 247 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) | 258 ASSERT(bucketPtr == |
| 248 { | 259 &root->bucketLookups[0] + |
| 249 // Failure here indicates a memory leak. | 260 ((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder)); |
| 250 bool foundLeak = bucket->numFullPages; | 261 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), |
| 251 for (PartitionPage* page = bucket->activePagesHead; page; page = page->nextP
age) | 262 // which tries to overflow to a non-existant order. |
| 252 foundLeak |= (page->numAllocatedSlots > 0); | 263 *bucketPtr = &PartitionRootGeneric::gPagedBucket; |
| 253 return foundLeak; | 264 } |
| 254 } | 265 |
| 255 | 266 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) { |
| 256 static bool partitionAllocBaseShutdown(PartitionRootBase* root) | 267 // Failure here indicates a memory leak. |
| 257 { | 268 bool foundLeak = bucket->numFullPages; |
| 258 ASSERT(root->initialized); | 269 for (PartitionPage* page = bucket->activePagesHead; page; |
| 259 root->initialized = false; | 270 page = page->nextPage) |
| 260 | 271 foundLeak |= (page->numAllocatedSlots > 0); |
| 261 // Now that we've examined all partition pages in all buckets, it's safe | 272 return foundLeak; |
| 262 // to free all our super pages. Since the super page extent entries are | 273 } |
| 263 // stored in the super pages, we need to be careful not to access them | 274 |
| 264 // after we've released the corresponding super page. | 275 static bool partitionAllocBaseShutdown(PartitionRootBase* root) { |
| 265 PartitionSuperPageExtentEntry* entry = root->firstExtent; | 276 ASSERT(root->initialized); |
| 266 while (entry) { | 277 root->initialized = false; |
| 267 PartitionSuperPageExtentEntry* nextEntry = entry->next; | 278 |
| 268 char* superPage = entry->superPageBase; | 279 // Now that we've examined all partition pages in all buckets, it's safe |
| 269 char* superPagesEnd = entry->superPagesEnd; | 280 // to free all our super pages. Since the super page extent entries are |
| 270 while (superPage < superPagesEnd) { | 281 // stored in the super pages, we need to be careful not to access them |
| 271 freePages(superPage, kSuperPageSize); | 282 // after we've released the corresponding super page. |
| 272 superPage += kSuperPageSize; | 283 PartitionSuperPageExtentEntry* entry = root->firstExtent; |
| 273 } | 284 while (entry) { |
| 274 entry = nextEntry; | 285 PartitionSuperPageExtentEntry* nextEntry = entry->next; |
| 275 } | 286 char* superPage = entry->superPageBase; |
| 276 return root->directMapList; | 287 char* superPagesEnd = entry->superPagesEnd; |
| 277 } | 288 while (superPage < superPagesEnd) { |
| 278 | 289 freePages(superPage, kSuperPageSize); |
| 279 bool partitionAllocShutdown(PartitionRoot* root) | 290 superPage += kSuperPageSize; |
| 280 { | 291 } |
| 281 bool foundLeak = false; | 292 entry = nextEntry; |
| 282 size_t i; | 293 } |
| 283 for (i = 0; i < root->numBuckets; ++i) { | 294 return root->directMapList; |
| 284 PartitionBucket* bucket = &root->buckets()[i]; | 295 } |
| 285 foundLeak |= partitionAllocShutdownBucket(bucket); | 296 |
| 286 } | 297 bool partitionAllocShutdown(PartitionRoot* root) { |
| 287 foundLeak |= partitionAllocBaseShutdown(root); | 298 bool foundLeak = false; |
| 288 return !foundLeak; | 299 size_t i; |
| 289 } | 300 for (i = 0; i < root->numBuckets; ++i) { |
| 290 | 301 PartitionBucket* bucket = &root->buckets()[i]; |
| 291 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) | 302 foundLeak |= partitionAllocShutdownBucket(bucket); |
| 292 { | 303 } |
| 293 SpinLock::Guard guard(root->lock); | 304 foundLeak |= partitionAllocBaseShutdown(root); |
| 294 bool foundLeak = false; | 305 return !foundLeak; |
| 295 size_t i; | 306 } |
| 296 for (i = 0; i < kGenericNumBuckets; ++i) { | 307 |
| 297 PartitionBucket* bucket = &root->buckets[i]; | 308 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) { |
| 298 foundLeak |= partitionAllocShutdownBucket(bucket); | 309 SpinLock::Guard guard(root->lock); |
| 299 } | 310 bool foundLeak = false; |
| 300 foundLeak |= partitionAllocBaseShutdown(root); | 311 size_t i; |
| 301 return !foundLeak; | 312 for (i = 0; i < kGenericNumBuckets; ++i) { |
| 313 PartitionBucket* bucket = &root->buckets[i]; |
| 314 foundLeak |= partitionAllocShutdownBucket(bucket); |
| 315 } |
| 316 foundLeak |= partitionAllocBaseShutdown(root); |
| 317 return !foundLeak; |
| 302 } | 318 } |
| 303 | 319 |
| 304 #if !CPU(64BIT) | 320 #if !CPU(64BIT) |
| 305 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() | 321 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() { |
| 306 { | 322 IMMEDIATE_CRASH(); |
| 307 IMMEDIATE_CRASH(); | |
| 308 } | 323 } |
| 309 #endif | 324 #endif |
| 310 | 325 |
| 311 static NEVER_INLINE void partitionOutOfMemory(const PartitionRootBase* root) | 326 static NEVER_INLINE void partitionOutOfMemory(const PartitionRootBase* root) { |
| 312 { | |
| 313 #if !CPU(64BIT) | 327 #if !CPU(64BIT) |
| 314 // Check whether this OOM is due to a lot of super pages that are allocated | 328 // Check whether this OOM is due to a lot of super pages that are allocated |
| 315 // but not committed, probably due to http://crbug.com/421387. | 329 // but not committed, probably due to http://crbug.com/421387. |
| 316 if (root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages - root-
>totalSizeOfCommittedPages > kReasonableSizeOfUnusedPages) { | 330 if (root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages - |
| 317 partitionOutOfMemoryWithLotsOfUncommitedPages(); | 331 root->totalSizeOfCommittedPages > |
| 318 } | 332 kReasonableSizeOfUnusedPages) { |
| 333 partitionOutOfMemoryWithLotsOfUncommitedPages(); |
| 334 } |
| 319 #endif | 335 #endif |
| 320 if (PartitionRootBase::gOomHandlingFunction) | 336 if (PartitionRootBase::gOomHandlingFunction) |
| 321 (*PartitionRootBase::gOomHandlingFunction)(); | 337 (*PartitionRootBase::gOomHandlingFunction)(); |
| 322 IMMEDIATE_CRASH(); | 338 IMMEDIATE_CRASH(); |
| 323 } | 339 } |
| 324 | 340 |
| 325 static NEVER_INLINE void partitionExcessiveAllocationSize() | 341 static NEVER_INLINE void partitionExcessiveAllocationSize() { |
| 326 { | 342 IMMEDIATE_CRASH(); |
| 327 IMMEDIATE_CRASH(); | 343 } |
| 328 } | 344 |
| 329 | 345 static NEVER_INLINE void partitionBucketFull() { |
| 330 static NEVER_INLINE void partitionBucketFull() | 346 IMMEDIATE_CRASH(); |
| 331 { | |
| 332 IMMEDIATE_CRASH(); | |
| 333 } | 347 } |
| 334 | 348 |
| 335 // partitionPageStateIs* | 349 // partitionPageStateIs* |
| 336 // Note that it's only valid to call these functions on pages found on one of | 350 // Note that it's only valid to call these functions on pages found on one of |
| 337 // the page lists. Specifically, you can't call these functions on full pages | 351 // the page lists. Specifically, you can't call these functions on full pages |
| 338 // that were detached from the active list. | 352 // that were detached from the active list. |
| 339 static bool ALWAYS_INLINE partitionPageStateIsActive(const PartitionPage* page) | 353 static bool ALWAYS_INLINE |
| 340 { | 354 partitionPageStateIsActive(const PartitionPage* page) { |
| 341 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 355 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 342 ASSERT(!page->pageOffset); | 356 ASSERT(!page->pageOffset); |
| 343 return (page->numAllocatedSlots > 0 && (page->freelistHead || page->numUnpro
visionedSlots)); | 357 return (page->numAllocatedSlots > 0 && |
| 344 } | 358 (page->freelistHead || page->numUnprovisionedSlots)); |
| 345 | 359 } |
| 346 static bool ALWAYS_INLINE partitionPageStateIsFull(const PartitionPage* page) | 360 |
| 347 { | 361 static bool ALWAYS_INLINE partitionPageStateIsFull(const PartitionPage* page) { |
| 348 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 362 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 349 ASSERT(!page->pageOffset); | 363 ASSERT(!page->pageOffset); |
| 350 bool ret = (page->numAllocatedSlots == partitionBucketSlots(page->bucket)); | 364 bool ret = (page->numAllocatedSlots == partitionBucketSlots(page->bucket)); |
| 351 if (ret) { | 365 if (ret) { |
| 352 ASSERT(!page->freelistHead); | 366 ASSERT(!page->freelistHead); |
| 353 ASSERT(!page->numUnprovisionedSlots); | 367 ASSERT(!page->numUnprovisionedSlots); |
| 354 } | 368 } |
| 369 return ret; |
| 370 } |
| 371 |
| 372 static bool ALWAYS_INLINE partitionPageStateIsEmpty(const PartitionPage* page) { |
| 373 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 374 ASSERT(!page->pageOffset); |
| 375 return (!page->numAllocatedSlots && page->freelistHead); |
| 376 } |
| 377 |
| 378 static bool ALWAYS_INLINE |
| 379 partitionPageStateIsDecommitted(const PartitionPage* page) { |
| 380 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 381 ASSERT(!page->pageOffset); |
| 382 bool ret = (!page->numAllocatedSlots && !page->freelistHead); |
| 383 if (ret) { |
| 384 ASSERT(!page->numUnprovisionedSlots); |
| 385 ASSERT(page->emptyCacheIndex == -1); |
| 386 } |
| 387 return ret; |
| 388 } |
| 389 |
| 390 static void partitionIncreaseCommittedPages(PartitionRootBase* root, |
| 391 size_t len) { |
| 392 root->totalSizeOfCommittedPages += len; |
| 393 ASSERT(root->totalSizeOfCommittedPages <= |
| 394 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); |
| 395 } |
| 396 |
| 397 static void partitionDecreaseCommittedPages(PartitionRootBase* root, |
| 398 size_t len) { |
| 399 root->totalSizeOfCommittedPages -= len; |
| 400 ASSERT(root->totalSizeOfCommittedPages <= |
| 401 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); |
| 402 } |
| 403 |
| 404 static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, |
| 405 void* addr, |
| 406 size_t len) { |
| 407 decommitSystemPages(addr, len); |
| 408 partitionDecreaseCommittedPages(root, len); |
| 409 } |
| 410 |
| 411 static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, |
| 412 void* addr, |
| 413 size_t len) { |
| 414 recommitSystemPages(addr, len); |
| 415 partitionIncreaseCommittedPages(root, len); |
| 416 } |
| 417 |
| 418 static ALWAYS_INLINE void* partitionAllocPartitionPages( |
| 419 PartitionRootBase* root, |
| 420 int flags, |
| 421 uint16_t numPartitionPages) { |
| 422 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % |
| 423 kPartitionPageSize)); |
| 424 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % |
| 425 kPartitionPageSize)); |
| 426 ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage); |
| 427 size_t totalSize = kPartitionPageSize * numPartitionPages; |
| 428 size_t numPartitionPagesLeft = |
| 429 (root->nextPartitionPageEnd - root->nextPartitionPage) >> |
| 430 kPartitionPageShift; |
| 431 if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { |
| 432 // In this case, we can still hand out pages from the current super page |
| 433 // allocation. |
| 434 char* ret = root->nextPartitionPage; |
| 435 root->nextPartitionPage += totalSize; |
| 436 partitionIncreaseCommittedPages(root, totalSize); |
| 355 return ret; | 437 return ret; |
| 356 } | 438 } |
| 357 | 439 |
| 358 static bool ALWAYS_INLINE partitionPageStateIsEmpty(const PartitionPage* page) | 440 // Need a new super page. We want to allocate super pages in a continguous |
| 359 { | 441 // address region as much as possible. This is important for not causing |
| 360 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 442 // page table bloat and not fragmenting address spaces in 32 bit architectures
. |
| 361 ASSERT(!page->pageOffset); | 443 char* requestedAddress = root->nextSuperPage; |
| 362 return (!page->numAllocatedSlots && page->freelistHead); | 444 char* superPage = reinterpret_cast<char*>(allocPages( |
| 363 } | 445 requestedAddress, kSuperPageSize, kSuperPageSize, PageAccessible)); |
| 364 | 446 if (UNLIKELY(!superPage)) |
| 365 static bool ALWAYS_INLINE partitionPageStateIsDecommitted(const PartitionPage* p
age) | 447 return 0; |
| 366 { | 448 |
| 367 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 449 root->totalSizeOfSuperPages += kSuperPageSize; |
| 368 ASSERT(!page->pageOffset); | 450 partitionIncreaseCommittedPages(root, totalSize); |
| 369 bool ret = (!page->numAllocatedSlots && !page->freelistHead); | 451 |
| 370 if (ret) { | 452 root->nextSuperPage = superPage + kSuperPageSize; |
| 371 ASSERT(!page->numUnprovisionedSlots); | 453 char* ret = superPage + kPartitionPageSize; |
| 372 ASSERT(page->emptyCacheIndex == -1); | 454 root->nextPartitionPage = ret + totalSize; |
| 373 } | 455 root->nextPartitionPageEnd = root->nextSuperPage - kPartitionPageSize; |
| 374 return ret; | 456 // Make the first partition page in the super page a guard page, but leave a |
| 375 } | 457 // hole in the middle. |
| 376 | 458 // This is where we put page metadata and also a tiny amount of extent |
| 377 static void partitionIncreaseCommittedPages(PartitionRootBase* root, size_t len) | 459 // metadata. |
| 378 { | 460 setSystemPagesInaccessible(superPage, kSystemPageSize); |
| 379 root->totalSizeOfCommittedPages += len; | 461 setSystemPagesInaccessible(superPage + (kSystemPageSize * 2), |
| 380 ASSERT(root->totalSizeOfCommittedPages <= root->totalSizeOfSuperPages + root
->totalSizeOfDirectMappedPages); | 462 kPartitionPageSize - (kSystemPageSize * 2)); |
| 381 } | 463 // Also make the last partition page a guard page. |
| 382 | 464 setSystemPagesInaccessible(superPage + (kSuperPageSize - kPartitionPageSize), |
| 383 static void partitionDecreaseCommittedPages(PartitionRootBase* root, size_t len) | 465 kPartitionPageSize); |
| 384 { | 466 |
| 385 root->totalSizeOfCommittedPages -= len; | 467 // If we were after a specific address, but didn't get it, assume that |
| 386 ASSERT(root->totalSizeOfCommittedPages <= root->totalSizeOfSuperPages + root
->totalSizeOfDirectMappedPages); | 468 // the system chose a lousy address. Here most OS'es have a default |
| 387 } | 469 // algorithm that isn't randomized. For example, most Linux |
| 388 | 470 // distributions will allocate the mapping directly before the last |
| 389 static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root,
void* addr, size_t len) | 471 // successful mapping, which is far from random. So we just get fresh |
| 390 { | 472 // randomness for the next mapping attempt. |
| 391 decommitSystemPages(addr, len); | 473 if (requestedAddress && requestedAddress != superPage) |
| 392 partitionDecreaseCommittedPages(root, len); | 474 root->nextSuperPage = 0; |
| 393 } | 475 |
| 394 | 476 // We allocated a new super page so update super page metadata. |
| 395 static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root,
void* addr, size_t len) | 477 // First check if this is a new extent or not. |
| 396 { | 478 PartitionSuperPageExtentEntry* latestExtent = |
| 397 recommitSystemPages(addr, len); | 479 reinterpret_cast<PartitionSuperPageExtentEntry*>( |
| 398 partitionIncreaseCommittedPages(root, len); | 480 partitionSuperPageToMetadataArea(superPage)); |
| 399 } | 481 // By storing the root in every extent metadata object, we have a fast way |
| 400 | 482 // to go from a pointer within the partition to the root object. |
| 401 static ALWAYS_INLINE void* partitionAllocPartitionPages(PartitionRootBase* root,
int flags, uint16_t numPartitionPages) | 483 latestExtent->root = root; |
| 402 { | 484 // Most new extents will be part of a larger extent, and these three fields |
| 403 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % kPartitionPa
geSize)); | 485 // are unused, but we initialize them to 0 so that we get a clear signal |
| 404 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % kPartitio
nPageSize)); | 486 // in case they are accidentally used. |
| 405 ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage); | 487 latestExtent->superPageBase = 0; |
| 406 size_t totalSize = kPartitionPageSize * numPartitionPages; | 488 latestExtent->superPagesEnd = 0; |
| 407 size_t numPartitionPagesLeft = (root->nextPartitionPageEnd - root->nextParti
tionPage) >> kPartitionPageShift; | 489 latestExtent->next = 0; |
| 408 if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { | 490 |
| 409 // In this case, we can still hand out pages from the current super page | 491 PartitionSuperPageExtentEntry* currentExtent = root->currentExtent; |
| 410 // allocation. | 492 bool isNewExtent = (superPage != requestedAddress); |
| 411 char* ret = root->nextPartitionPage; | 493 if (UNLIKELY(isNewExtent)) { |
| 412 root->nextPartitionPage += totalSize; | 494 if (UNLIKELY(!currentExtent)) { |
| 413 partitionIncreaseCommittedPages(root, totalSize); | 495 ASSERT(!root->firstExtent); |
| 414 return ret; | 496 root->firstExtent = latestExtent; |
| 415 } | |
| 416 | |
| 417 // Need a new super page. We want to allocate super pages in a continguous | |
| 418 // address region as much as possible. This is important for not causing | |
| 419 // page table bloat and not fragmenting address spaces in 32 bit architectur
es. | |
| 420 char* requestedAddress = root->nextSuperPage; | |
| 421 char* superPage = reinterpret_cast<char*>(allocPages(requestedAddress, kSupe
rPageSize, kSuperPageSize, PageAccessible)); | |
| 422 if (UNLIKELY(!superPage)) | |
| 423 return 0; | |
| 424 | |
| 425 root->totalSizeOfSuperPages += kSuperPageSize; | |
| 426 partitionIncreaseCommittedPages(root, totalSize); | |
| 427 | |
| 428 root->nextSuperPage = superPage + kSuperPageSize; | |
| 429 char* ret = superPage + kPartitionPageSize; | |
| 430 root->nextPartitionPage = ret + totalSize; | |
| 431 root->nextPartitionPageEnd = root->nextSuperPage - kPartitionPageSize; | |
| 432 // Make the first partition page in the super page a guard page, but leave a | |
| 433 // hole in the middle. | |
| 434 // This is where we put page metadata and also a tiny amount of extent | |
| 435 // metadata. | |
| 436 setSystemPagesInaccessible(superPage, kSystemPageSize); | |
| 437 setSystemPagesInaccessible(superPage + (kSystemPageSize * 2), kPartitionPage
Size - (kSystemPageSize * 2)); | |
| 438 // Also make the last partition page a guard page. | |
| 439 setSystemPagesInaccessible(superPage + (kSuperPageSize - kPartitionPageSize)
, kPartitionPageSize); | |
| 440 | |
| 441 // If we were after a specific address, but didn't get it, assume that | |
| 442 // the system chose a lousy address. Here most OS'es have a default | |
| 443 // algorithm that isn't randomized. For example, most Linux | |
| 444 // distributions will allocate the mapping directly before the last | |
| 445 // successful mapping, which is far from random. So we just get fresh | |
| 446 // randomness for the next mapping attempt. | |
| 447 if (requestedAddress && requestedAddress != superPage) | |
| 448 root->nextSuperPage = 0; | |
| 449 | |
| 450 // We allocated a new super page so update super page metadata. | |
| 451 // First check if this is a new extent or not. | |
| 452 PartitionSuperPageExtentEntry* latestExtent = reinterpret_cast<PartitionSupe
rPageExtentEntry*>(partitionSuperPageToMetadataArea(superPage)); | |
| 453 // By storing the root in every extent metadata object, we have a fast way | |
| 454 // to go from a pointer within the partition to the root object. | |
| 455 latestExtent->root = root; | |
| 456 // Most new extents will be part of a larger extent, and these three fields | |
| 457 // are unused, but we initialize them to 0 so that we get a clear signal | |
| 458 // in case they are accidentally used. | |
| 459 latestExtent->superPageBase = 0; | |
| 460 latestExtent->superPagesEnd = 0; | |
| 461 latestExtent->next = 0; | |
| 462 | |
| 463 PartitionSuperPageExtentEntry* currentExtent = root->currentExtent; | |
| 464 bool isNewExtent = (superPage != requestedAddress); | |
| 465 if (UNLIKELY(isNewExtent)) { | |
| 466 if (UNLIKELY(!currentExtent)) { | |
| 467 ASSERT(!root->firstExtent); | |
| 468 root->firstExtent = latestExtent; | |
| 469 } else { | |
| 470 ASSERT(currentExtent->superPageBase); | |
| 471 currentExtent->next = latestExtent; | |
| 472 } | |
| 473 root->currentExtent = latestExtent; | |
| 474 latestExtent->superPageBase = superPage; | |
| 475 latestExtent->superPagesEnd = superPage + kSuperPageSize; | |
| 476 } else { | 497 } else { |
| 477 // We allocated next to an existing extent so just nudge the size up a l
ittle. | 498 ASSERT(currentExtent->superPageBase); |
| 478 ASSERT(currentExtent->superPagesEnd); | 499 currentExtent->next = latestExtent; |
| 479 currentExtent->superPagesEnd += kSuperPageSize; | 500 } |
| 480 ASSERT(ret >= currentExtent->superPageBase && ret < currentExtent->super
PagesEnd); | 501 root->currentExtent = latestExtent; |
| 481 } | 502 latestExtent->superPageBase = superPage; |
| 482 return ret; | 503 latestExtent->superPagesEnd = superPage + kSuperPageSize; |
| 483 } | 504 } else { |
| 484 | 505 // We allocated next to an existing extent so just nudge the size up a littl
e. |
| 485 static ALWAYS_INLINE uint16_t partitionBucketPartitionPages(const PartitionBucke
t* bucket) | 506 ASSERT(currentExtent->superPagesEnd); |
| 486 { | 507 currentExtent->superPagesEnd += kSuperPageSize; |
| 487 return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage
- 1)) / kNumSystemPagesPerPartitionPage; | 508 ASSERT(ret >= currentExtent->superPageBase && |
| 488 } | 509 ret < currentExtent->superPagesEnd); |
| 489 | 510 } |
| 490 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) | 511 return ret; |
| 491 { | 512 } |
| 492 ASSERT(partitionPageStateIsDecommitted(page)); | 513 |
| 493 | 514 static ALWAYS_INLINE uint16_t |
| 494 page->numUnprovisionedSlots = partitionBucketSlots(page->bucket); | 515 partitionBucketPartitionPages(const PartitionBucket* bucket) { |
| 495 ASSERT(page->numUnprovisionedSlots); | 516 return (bucket->numSystemPagesPerSlotSpan + |
| 496 | 517 (kNumSystemPagesPerPartitionPage - 1)) / |
| 497 page->nextPage = nullptr; | 518 kNumSystemPagesPerPartitionPage; |
| 498 } | 519 } |
| 499 | 520 |
| 500 static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, PartitionBucke
t* bucket) | 521 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) { |
| 501 { | 522 ASSERT(partitionPageStateIsDecommitted(page)); |
| 502 // The bucket never changes. We set it up once. | 523 |
| 503 page->bucket = bucket; | 524 page->numUnprovisionedSlots = partitionBucketSlots(page->bucket); |
| 504 page->emptyCacheIndex = -1; | 525 ASSERT(page->numUnprovisionedSlots); |
| 505 | 526 |
| 506 partitionPageReset(page); | 527 page->nextPage = nullptr; |
| 507 | 528 } |
| 508 // If this page has just a single slot, do not set up page offsets for any | 529 |
| 509 // page metadata other than the first one. This ensures that attempts to | 530 static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, |
| 510 // touch invalid page metadata fail. | 531 PartitionBucket* bucket) { |
| 511 if (page->numUnprovisionedSlots == 1) | 532 // The bucket never changes. We set it up once. |
| 512 return; | 533 page->bucket = bucket; |
| 513 | 534 page->emptyCacheIndex = -1; |
| 514 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | 535 |
| 515 char* pageCharPtr = reinterpret_cast<char*>(page); | 536 partitionPageReset(page); |
| 516 for (uint16_t i = 1; i < numPartitionPages; ++i) { | 537 |
| 517 pageCharPtr += kPageMetadataSize; | 538 // If this page has just a single slot, do not set up page offsets for any |
| 518 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar
Ptr); | 539 // page metadata other than the first one. This ensures that attempts to |
| 519 secondaryPage->pageOffset = i; | 540 // touch invalid page metadata fail. |
| 520 } | 541 if (page->numUnprovisionedSlots == 1) |
| 521 } | 542 return; |
| 522 | 543 |
| 523 static ALWAYS_INLINE size_t partitionRoundUpToSystemPage(size_t size) | 544 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); |
| 524 { | 545 char* pageCharPtr = reinterpret_cast<char*>(page); |
| 525 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; | 546 for (uint16_t i = 1; i < numPartitionPages; ++i) { |
| 526 } | 547 pageCharPtr += kPageMetadataSize; |
| 527 | 548 PartitionPage* secondaryPage = |
| 528 static ALWAYS_INLINE size_t partitionRoundDownToSystemPage(size_t size) | 549 reinterpret_cast<PartitionPage*>(pageCharPtr); |
| 529 { | 550 secondaryPage->pageOffset = i; |
| 530 return size & kSystemPageBaseMask; | 551 } |
| 531 } | 552 } |
| 532 | 553 |
| 533 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page
) | 554 static ALWAYS_INLINE size_t partitionRoundUpToSystemPage(size_t size) { |
| 534 { | 555 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; |
| 535 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 556 } |
| 536 uint16_t numSlots = page->numUnprovisionedSlots; | 557 |
| 537 ASSERT(numSlots); | 558 static ALWAYS_INLINE size_t partitionRoundDownToSystemPage(size_t size) { |
| 538 PartitionBucket* bucket = page->bucket; | 559 return size & kSystemPageBaseMask; |
| 539 // We should only get here when _every_ slot is either used or unprovisioned
. | 560 } |
| 540 // (The third state is "on the freelist". If we have a non-empty freelist, w
e should not get here.) | 561 |
| 541 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); | 562 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist( |
| 542 // Similarly, make explicitly sure that the freelist is empty. | 563 PartitionPage* page) { |
| 543 ASSERT(!page->freelistHead); | 564 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 544 ASSERT(page->numAllocatedSlots >= 0); | 565 uint16_t numSlots = page->numUnprovisionedSlots; |
| 545 | 566 ASSERT(numSlots); |
| 546 size_t size = bucket->slotSize; | 567 PartitionBucket* bucket = page->bucket; |
| 547 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); | 568 // We should only get here when _every_ slot is either used or unprovisioned. |
| 548 char* returnObject = base + (size * page->numAllocatedSlots); | 569 // (The third state is "on the freelist". If we have a non-empty freelist, we
should not get here.) |
| 549 char* firstFreelistPointer = returnObject + size; | 570 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); |
| 550 char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFr
eelistEntry*); | 571 // Similarly, make explicitly sure that the freelist is empty. |
| 551 // Our goal is to fault as few system pages as possible. We calculate the | 572 ASSERT(!page->freelistHead); |
| 552 // page containing the "end" of the returned slot, and then allow freelist | 573 ASSERT(page->numAllocatedSlots >= 0); |
| 553 // pointers to be written up to the end of that page. | 574 |
| 554 char* subPageLimit = reinterpret_cast<char*>(partitionRoundUpToSystemPage(re
interpret_cast<size_t>(firstFreelistPointer))); | 575 size_t size = bucket->slotSize; |
| 555 char* slotsLimit = returnObject + (size * numSlots); | 576 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 556 char* freelistLimit = subPageLimit; | 577 char* returnObject = base + (size * page->numAllocatedSlots); |
| 557 if (UNLIKELY(slotsLimit < freelistLimit)) | 578 char* firstFreelistPointer = returnObject + size; |
| 558 freelistLimit = slotsLimit; | 579 char* firstFreelistPointerExtent = |
| 559 | 580 firstFreelistPointer + sizeof(PartitionFreelistEntry*); |
| 560 uint16_t numNewFreelistEntries = 0; | 581 // Our goal is to fault as few system pages as possible. We calculate the |
| 561 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { | 582 // page containing the "end" of the returned slot, and then allow freelist |
| 562 // Only consider used space in the slot span. If we consider wasted | 583 // pointers to be written up to the end of that page. |
| 563 // space, we may get an off-by-one when a freelist pointer fits in the | 584 char* subPageLimit = reinterpret_cast<char*>(partitionRoundUpToSystemPage( |
| 564 // wasted space, but a slot does not. | 585 reinterpret_cast<size_t>(firstFreelistPointer))); |
| 565 // We know we can fit at least one freelist pointer. | 586 char* slotsLimit = returnObject + (size * numSlots); |
| 566 numNewFreelistEntries = 1; | 587 char* freelistLimit = subPageLimit; |
| 567 // Any further entries require space for the whole slot span. | 588 if (UNLIKELY(slotsLimit < freelistLimit)) |
| 568 numNewFreelistEntries += static_cast<uint16_t>((freelistLimit - firstFre
elistPointerExtent) / size); | 589 freelistLimit = slotsLimit; |
| 569 } | 590 |
| 570 | 591 uint16_t numNewFreelistEntries = 0; |
| 571 // We always return an object slot -- that's the +1 below. | 592 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { |
| 572 // We do not neccessarily create any new freelist entries, because we cross
sub page boundaries frequently for large bucket sizes. | 593 // Only consider used space in the slot span. If we consider wasted |
| 573 ASSERT(numNewFreelistEntries + 1 <= numSlots); | 594 // space, we may get an off-by-one when a freelist pointer fits in the |
| 574 numSlots -= (numNewFreelistEntries + 1); | 595 // wasted space, but a slot does not. |
| 575 page->numUnprovisionedSlots = numSlots; | 596 // We know we can fit at least one freelist pointer. |
| 576 page->numAllocatedSlots++; | 597 numNewFreelistEntries = 1; |
| 577 | 598 // Any further entries require space for the whole slot span. |
| 578 if (LIKELY(numNewFreelistEntries)) { | 599 numNewFreelistEntries += static_cast<uint16_t>( |
| 579 char* freelistPointer = firstFreelistPointer; | 600 (freelistLimit - firstFreelistPointerExtent) / size); |
| 580 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*
>(freelistPointer); | 601 } |
| 581 page->freelistHead = entry; | 602 |
| 582 while (--numNewFreelistEntries) { | 603 // We always return an object slot -- that's the +1 below. |
| 583 freelistPointer += size; | 604 // We do not neccessarily create any new freelist entries, because we cross su
b page boundaries frequently for large bucket sizes. |
| 584 PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreeli
stEntry*>(freelistPointer); | 605 ASSERT(numNewFreelistEntries + 1 <= numSlots); |
| 585 entry->next = partitionFreelistMask(nextEntry); | 606 numSlots -= (numNewFreelistEntries + 1); |
| 586 entry = nextEntry; | 607 page->numUnprovisionedSlots = numSlots; |
| 587 } | 608 page->numAllocatedSlots++; |
| 588 entry->next = partitionFreelistMask(0); | 609 |
| 589 } else { | 610 if (LIKELY(numNewFreelistEntries)) { |
| 590 page->freelistHead = 0; | 611 char* freelistPointer = firstFreelistPointer; |
| 591 } | 612 PartitionFreelistEntry* entry = |
| 592 return returnObject; | 613 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); |
| 614 page->freelistHead = entry; |
| 615 while (--numNewFreelistEntries) { |
| 616 freelistPointer += size; |
| 617 PartitionFreelistEntry* nextEntry = |
| 618 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); |
| 619 entry->next = partitionFreelistMask(nextEntry); |
| 620 entry = nextEntry; |
| 621 } |
| 622 entry->next = partitionFreelistMask(0); |
| 623 } else { |
| 624 page->freelistHead = 0; |
| 625 } |
| 626 return returnObject; |
| 593 } | 627 } |
| 594 | 628 |
| 595 // This helper function scans a bucket's active page list for a suitable new | 629 // This helper function scans a bucket's active page list for a suitable new |
| 596 // active page. | 630 // active page. |
| 597 // When it finds a suitable new active page (one that has free slots and is not | 631 // When it finds a suitable new active page (one that has free slots and is not |
| 598 // empty), it is set as the new active page. If there is no suitable new | 632 // empty), it is set as the new active page. If there is no suitable new |
| 599 // active page, the current active page is set to the seed page. | 633 // active page, the current active page is set to the seed page. |
| 600 // As potential pages are scanned, they are tidied up according to their state. | 634 // As potential pages are scanned, they are tidied up according to their state. |
| 601 // Empty pages are swept on to the empty page list, decommitted pages on to the | 635 // Empty pages are swept on to the empty page list, decommitted pages on to the |
| 602 // decommitted page list and full pages are unlinked from any list. | 636 // decommitted page list and full pages are unlinked from any list. |
| 603 static bool partitionSetNewActivePage(PartitionBucket* bucket) | 637 static bool partitionSetNewActivePage(PartitionBucket* bucket) { |
| 604 { | 638 PartitionPage* page = bucket->activePagesHead; |
| 605 PartitionPage* page = bucket->activePagesHead; | 639 if (page == &PartitionRootBase::gSeedPage) |
| 606 if (page == &PartitionRootBase::gSeedPage) | |
| 607 return false; | |
| 608 | |
| 609 PartitionPage* nextPage; | |
| 610 | |
| 611 for (; page; page = nextPage) { | |
| 612 nextPage = page->nextPage; | |
| 613 ASSERT(page->bucket == bucket); | |
| 614 ASSERT(page != bucket->emptyPagesHead); | |
| 615 ASSERT(page != bucket->decommittedPagesHead); | |
| 616 | |
| 617 // Deal with empty and decommitted pages. | |
| 618 if (LIKELY(partitionPageStateIsActive(page))) { | |
| 619 // This page is usable because it has freelist entries, or has | |
| 620 // unprovisioned slots we can create freelist entries from. | |
| 621 bucket->activePagesHead = page; | |
| 622 return true; | |
| 623 } | |
| 624 if (LIKELY(partitionPageStateIsEmpty(page))) { | |
| 625 page->nextPage = bucket->emptyPagesHead; | |
| 626 bucket->emptyPagesHead = page; | |
| 627 } else if (LIKELY(partitionPageStateIsDecommitted(page))) { | |
| 628 page->nextPage = bucket->decommittedPagesHead; | |
| 629 bucket->decommittedPagesHead = page; | |
| 630 } else { | |
| 631 ASSERT(partitionPageStateIsFull(page)); | |
| 632 // If we get here, we found a full page. Skip over it too, and also | |
| 633 // tag it as full (via a negative value). We need it tagged so that | |
| 634 // free'ing can tell, and move it back into the active page list. | |
| 635 page->numAllocatedSlots = -page->numAllocatedSlots; | |
| 636 ++bucket->numFullPages; | |
| 637 // numFullPages is a uint16_t for efficient packing so guard against | |
| 638 // overflow to be safe. | |
| 639 if (UNLIKELY(!bucket->numFullPages)) | |
| 640 partitionBucketFull(); | |
| 641 // Not necessary but might help stop accidents. | |
| 642 page->nextPage = 0; | |
| 643 } | |
| 644 } | |
| 645 | |
| 646 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | |
| 647 return false; | 640 return false; |
| 648 } | 641 |
| 649 | 642 PartitionPage* nextPage; |
| 650 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(Pa
rtitionPage* page) | 643 |
| 651 { | 644 for (; page; page = nextPage) { |
| 652 ASSERT(partitionBucketIsDirectMapped(page->bucket)); | 645 nextPage = page->nextPage; |
| 653 return reinterpret_cast<PartitionDirectMapExtent*>(reinterpret_cast<char*>(p
age) + 3 * kPageMetadataSize); | 646 ASSERT(page->bucket == bucket); |
| 654 } | 647 ASSERT(page != bucket->emptyPagesHead); |
| 655 | 648 ASSERT(page != bucket->decommittedPagesHead); |
| 656 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, size_t si
ze) | 649 |
| 657 { | 650 // Deal with empty and decommitted pages. |
| 658 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | 651 if (LIKELY(partitionPageStateIsActive(page))) { |
| 659 if (UNLIKELY(rawSizePtr != nullptr)) | 652 // This page is usable because it has freelist entries, or has |
| 660 *rawSizePtr = size; | 653 // unprovisioned slots we can create freelist entries from. |
| 661 } | 654 bucket->activePagesHead = page; |
| 662 | 655 return true; |
| 663 static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root,
int flags, size_t rawSize) | 656 } |
| 664 { | 657 if (LIKELY(partitionPageStateIsEmpty(page))) { |
| 665 size_t size = partitionDirectMapSize(rawSize); | 658 page->nextPage = bucket->emptyPagesHead; |
| 666 | 659 bucket->emptyPagesHead = page; |
| 667 // Because we need to fake looking like a super page, we need to allocate | 660 } else if (LIKELY(partitionPageStateIsDecommitted(page))) { |
| 668 // a bunch of system pages more than "size": | 661 page->nextPage = bucket->decommittedPagesHead; |
| 669 // - The first few system pages are the partition page in which the super | 662 bucket->decommittedPagesHead = page; |
| 670 // page metadata is stored. We fault just one system page out of a partition | 663 } else { |
| 671 // page sized clump. | 664 ASSERT(partitionPageStateIsFull(page)); |
| 672 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the | 665 // If we get here, we found a full page. Skip over it too, and also |
| 673 // massive address space plus randomization instead). | 666 // tag it as full (via a negative value). We need it tagged so that |
| 674 size_t mapSize = size + kPartitionPageSize; | 667 // free'ing can tell, and move it back into the active page list. |
| 668 page->numAllocatedSlots = -page->numAllocatedSlots; |
| 669 ++bucket->numFullPages; |
| 670 // numFullPages is a uint16_t for efficient packing so guard against |
| 671 // overflow to be safe. |
| 672 if (UNLIKELY(!bucket->numFullPages)) |
| 673 partitionBucketFull(); |
| 674 // Not necessary but might help stop accidents. |
| 675 page->nextPage = 0; |
| 676 } |
| 677 } |
| 678 |
| 679 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; |
| 680 return false; |
| 681 } |
| 682 |
| 683 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent( |
| 684 PartitionPage* page) { |
| 685 ASSERT(partitionBucketIsDirectMapped(page->bucket)); |
| 686 return reinterpret_cast<PartitionDirectMapExtent*>( |
| 687 reinterpret_cast<char*>(page) + 3 * kPageMetadataSize); |
| 688 } |
| 689 |
| 690 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, |
| 691 size_t size) { |
| 692 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); |
| 693 if (UNLIKELY(rawSizePtr != nullptr)) |
| 694 *rawSizePtr = size; |
| 695 } |
| 696 |
| 697 static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root, |
| 698 int flags, |
| 699 size_t rawSize) { |
| 700 size_t size = partitionDirectMapSize(rawSize); |
| 701 |
| 702 // Because we need to fake looking like a super page, we need to allocate |
| 703 // a bunch of system pages more than "size": |
| 704 // - The first few system pages are the partition page in which the super |
| 705 // page metadata is stored. We fault just one system page out of a partition |
| 706 // page sized clump. |
| 707 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the |
| 708 // massive address space plus randomization instead). |
| 709 size_t mapSize = size + kPartitionPageSize; |
| 675 #if !CPU(64BIT) | 710 #if !CPU(64BIT) |
| 676 mapSize += kSystemPageSize; | 711 mapSize += kSystemPageSize; |
| 677 #endif | 712 #endif |
| 678 // Round up to the allocation granularity. | 713 // Round up to the allocation granularity. |
| 679 mapSize += kPageAllocationGranularityOffsetMask; | 714 mapSize += kPageAllocationGranularityOffsetMask; |
| 680 mapSize &= kPageAllocationGranularityBaseMask; | 715 mapSize &= kPageAllocationGranularityBaseMask; |
| 681 | 716 |
| 682 // TODO: these pages will be zero-filled. Consider internalizing an | 717 // TODO: these pages will be zero-filled. Consider internalizing an |
| 683 // allocZeroed() API so we can avoid a memset() entirely in this case. | 718 // allocZeroed() API so we can avoid a memset() entirely in this case. |
| 684 char* ptr = reinterpret_cast<char*>(allocPages(0, mapSize, kSuperPageSize, P
ageAccessible)); | 719 char* ptr = reinterpret_cast<char*>( |
| 685 if (UNLIKELY(!ptr)) | 720 allocPages(0, mapSize, kSuperPageSize, PageAccessible)); |
| 721 if (UNLIKELY(!ptr)) |
| 722 return nullptr; |
| 723 |
| 724 size_t committedPageSize = size + kSystemPageSize; |
| 725 root->totalSizeOfDirectMappedPages += committedPageSize; |
| 726 partitionIncreaseCommittedPages(root, committedPageSize); |
| 727 |
| 728 char* slot = ptr + kPartitionPageSize; |
| 729 setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), |
| 730 kPartitionPageSize - (kSystemPageSize * 2)); |
| 731 #if !CPU(64BIT) |
| 732 setSystemPagesInaccessible(ptr, kSystemPageSize); |
| 733 setSystemPagesInaccessible(slot + size, kSystemPageSize); |
| 734 #endif |
| 735 |
| 736 PartitionSuperPageExtentEntry* extent = |
| 737 reinterpret_cast<PartitionSuperPageExtentEntry*>( |
| 738 partitionSuperPageToMetadataArea(ptr)); |
| 739 extent->root = root; |
| 740 // The new structures are all located inside a fresh system page so they |
| 741 // will all be zeroed out. These ASSERTs are for documentation. |
| 742 ASSERT(!extent->superPageBase); |
| 743 ASSERT(!extent->superPagesEnd); |
| 744 ASSERT(!extent->next); |
| 745 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(slot); |
| 746 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>( |
| 747 reinterpret_cast<char*>(page) + (kPageMetadataSize * 2)); |
| 748 ASSERT(!page->nextPage); |
| 749 ASSERT(!page->numAllocatedSlots); |
| 750 ASSERT(!page->numUnprovisionedSlots); |
| 751 ASSERT(!page->pageOffset); |
| 752 ASSERT(!page->emptyCacheIndex); |
| 753 page->bucket = bucket; |
| 754 page->freelistHead = reinterpret_cast<PartitionFreelistEntry*>(slot); |
| 755 PartitionFreelistEntry* nextEntry = |
| 756 reinterpret_cast<PartitionFreelistEntry*>(slot); |
| 757 nextEntry->next = partitionFreelistMask(0); |
| 758 |
| 759 ASSERT(!bucket->activePagesHead); |
| 760 ASSERT(!bucket->emptyPagesHead); |
| 761 ASSERT(!bucket->decommittedPagesHead); |
| 762 ASSERT(!bucket->numSystemPagesPerSlotSpan); |
| 763 ASSERT(!bucket->numFullPages); |
| 764 bucket->slotSize = size; |
| 765 |
| 766 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); |
| 767 mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; |
| 768 mapExtent->bucket = bucket; |
| 769 |
| 770 // Maintain the doubly-linked list of all direct mappings. |
| 771 mapExtent->nextExtent = root->directMapList; |
| 772 if (mapExtent->nextExtent) |
| 773 mapExtent->nextExtent->prevExtent = mapExtent; |
| 774 mapExtent->prevExtent = nullptr; |
| 775 root->directMapList = mapExtent; |
| 776 |
| 777 return page; |
| 778 } |
| 779 |
| 780 static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) { |
| 781 PartitionRootBase* root = partitionPageToRoot(page); |
| 782 const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page); |
| 783 size_t unmapSize = extent->mapSize; |
| 784 |
| 785 // Maintain the doubly-linked list of all direct mappings. |
| 786 if (extent->prevExtent) { |
| 787 ASSERT(extent->prevExtent->nextExtent == extent); |
| 788 extent->prevExtent->nextExtent = extent->nextExtent; |
| 789 } else { |
| 790 root->directMapList = extent->nextExtent; |
| 791 } |
| 792 if (extent->nextExtent) { |
| 793 ASSERT(extent->nextExtent->prevExtent == extent); |
| 794 extent->nextExtent->prevExtent = extent->prevExtent; |
| 795 } |
| 796 |
| 797 // Add on the size of the trailing guard page and preceeding partition |
| 798 // page. |
| 799 unmapSize += kPartitionPageSize + kSystemPageSize; |
| 800 |
| 801 size_t uncommittedPageSize = page->bucket->slotSize + kSystemPageSize; |
| 802 partitionDecreaseCommittedPages(root, uncommittedPageSize); |
| 803 ASSERT(root->totalSizeOfDirectMappedPages >= uncommittedPageSize); |
| 804 root->totalSizeOfDirectMappedPages -= uncommittedPageSize; |
| 805 |
| 806 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask)); |
| 807 |
| 808 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 809 // Account for the mapping starting a partition page before the actual |
| 810 // allocation address. |
| 811 ptr -= kPartitionPageSize; |
| 812 |
| 813 freePages(ptr, unmapSize); |
| 814 } |
| 815 |
| 816 void* partitionAllocSlowPath(PartitionRootBase* root, |
| 817 int flags, |
| 818 size_t size, |
| 819 PartitionBucket* bucket) { |
| 820 // The slow path is called when the freelist is empty. |
| 821 ASSERT(!bucket->activePagesHead->freelistHead); |
| 822 |
| 823 PartitionPage* newPage = nullptr; |
| 824 |
| 825 // For the partitionAllocGeneric API, we have a bunch of buckets marked |
| 826 // as special cases. We bounce them through to the slow path so that we |
| 827 // can still have a blazing fast hot path due to lack of corner-case |
| 828 // branches. |
| 829 bool returnNull = flags & PartitionAllocReturnNull; |
| 830 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
| 831 ASSERT(size > kGenericMaxBucketed); |
| 832 ASSERT(bucket == &PartitionRootBase::gPagedBucket); |
| 833 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); |
| 834 if (size > kGenericMaxDirectMapped) { |
| 835 if (returnNull) |
| 686 return nullptr; | 836 return nullptr; |
| 687 | 837 partitionExcessiveAllocationSize(); |
| 688 size_t committedPageSize = size + kSystemPageSize; | 838 } |
| 689 root->totalSizeOfDirectMappedPages += committedPageSize; | 839 newPage = partitionDirectMap(root, flags, size); |
| 690 partitionIncreaseCommittedPages(root, committedPageSize); | 840 } else if (LIKELY(partitionSetNewActivePage(bucket))) { |
| 691 | 841 // First, did we find an active page in the active pages list? |
| 692 char* slot = ptr + kPartitionPageSize; | 842 newPage = bucket->activePagesHead; |
| 693 setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), kPartitionPageSize -
(kSystemPageSize * 2)); | 843 ASSERT(partitionPageStateIsActive(newPage)); |
| 694 #if !CPU(64BIT) | 844 } else if (LIKELY(bucket->emptyPagesHead != nullptr) || |
| 695 setSystemPagesInaccessible(ptr, kSystemPageSize); | 845 LIKELY(bucket->decommittedPagesHead != nullptr)) { |
| 696 setSystemPagesInaccessible(slot + size, kSystemPageSize); | 846 // Second, look in our lists of empty and decommitted pages. |
| 697 #endif | 847 // Check empty pages first, which are preferred, but beware that an |
| 698 | 848 // empty page might have been decommitted. |
| 699 PartitionSuperPageExtentEntry* extent = reinterpret_cast<PartitionSuperPageE
xtentEntry*>(partitionSuperPageToMetadataArea(ptr)); | 849 while (LIKELY((newPage = bucket->emptyPagesHead) != nullptr)) { |
| 700 extent->root = root; | 850 ASSERT(newPage->bucket == bucket); |
| 701 // The new structures are all located inside a fresh system page so they | 851 ASSERT(partitionPageStateIsEmpty(newPage) || |
| 702 // will all be zeroed out. These ASSERTs are for documentation. | 852 partitionPageStateIsDecommitted(newPage)); |
| 703 ASSERT(!extent->superPageBase); | 853 bucket->emptyPagesHead = newPage->nextPage; |
| 704 ASSERT(!extent->superPagesEnd); | 854 // Accept the empty page unless it got decommitted. |
| 705 ASSERT(!extent->next); | 855 if (newPage->freelistHead) { |
| 706 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(slot); | 856 newPage->nextPage = nullptr; |
| 707 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(reinterpret_cas
t<char*>(page) + (kPageMetadataSize * 2)); | 857 break; |
| 708 ASSERT(!page->nextPage); | 858 } |
| 709 ASSERT(!page->numAllocatedSlots); | 859 ASSERT(partitionPageStateIsDecommitted(newPage)); |
| 710 ASSERT(!page->numUnprovisionedSlots); | 860 newPage->nextPage = bucket->decommittedPagesHead; |
| 711 ASSERT(!page->pageOffset); | 861 bucket->decommittedPagesHead = newPage; |
| 712 ASSERT(!page->emptyCacheIndex); | 862 } |
| 713 page->bucket = bucket; | 863 if (UNLIKELY(!newPage) && LIKELY(bucket->decommittedPagesHead != nullptr)) { |
| 714 page->freelistHead = reinterpret_cast<PartitionFreelistEntry*>(slot); | 864 newPage = bucket->decommittedPagesHead; |
| 715 PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreelistEntry*
>(slot); | 865 ASSERT(newPage->bucket == bucket); |
| 716 nextEntry->next = partitionFreelistMask(0); | 866 ASSERT(partitionPageStateIsDecommitted(newPage)); |
| 717 | 867 bucket->decommittedPagesHead = newPage->nextPage; |
| 718 ASSERT(!bucket->activePagesHead); | 868 void* addr = partitionPageToPointer(newPage); |
| 719 ASSERT(!bucket->emptyPagesHead); | 869 partitionRecommitSystemPages(root, addr, |
| 720 ASSERT(!bucket->decommittedPagesHead); | 870 partitionBucketBytes(newPage->bucket)); |
| 721 ASSERT(!bucket->numSystemPagesPerSlotSpan); | 871 partitionPageReset(newPage); |
| 722 ASSERT(!bucket->numFullPages); | 872 } |
| 723 bucket->slotSize = size; | 873 ASSERT(newPage); |
| 724 | 874 } else { |
| 725 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); | 875 // Third. If we get here, we need a brand new page. |
| 726 mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; | 876 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); |
| 727 mapExtent->bucket = bucket; | 877 void* rawPages = |
| 728 | 878 partitionAllocPartitionPages(root, flags, numPartitionPages); |
| 729 // Maintain the doubly-linked list of all direct mappings. | 879 if (LIKELY(rawPages != nullptr)) { |
| 730 mapExtent->nextExtent = root->directMapList; | 880 newPage = partitionPointerToPageNoAlignmentCheck(rawPages); |
| 731 if (mapExtent->nextExtent) | 881 partitionPageSetup(newPage, bucket); |
| 732 mapExtent->nextExtent->prevExtent = mapExtent; | 882 } |
| 733 mapExtent->prevExtent = nullptr; | 883 } |
| 734 root->directMapList = mapExtent; | 884 |
| 735 | 885 // Bail if we had a memory allocation failure. |
| 736 return page; | 886 if (UNLIKELY(!newPage)) { |
| 737 } | 887 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); |
| 738 | 888 if (returnNull) |
| 739 static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) | 889 return nullptr; |
| 740 { | 890 partitionOutOfMemory(root); |
| 741 PartitionRootBase* root = partitionPageToRoot(page); | 891 } |
| 742 const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page
); | 892 |
| 743 size_t unmapSize = extent->mapSize; | 893 bucket = newPage->bucket; |
| 744 | 894 ASSERT(bucket != &PartitionRootBase::gPagedBucket); |
| 745 // Maintain the doubly-linked list of all direct mappings. | 895 bucket->activePagesHead = newPage; |
| 746 if (extent->prevExtent) { | 896 partitionPageSetRawSize(newPage, size); |
| 747 ASSERT(extent->prevExtent->nextExtent == extent); | 897 |
| 748 extent->prevExtent->nextExtent = extent->nextExtent; | 898 // If we found an active page with free slots, or an empty page, we have a |
| 749 } else { | 899 // usable freelist head. |
| 750 root->directMapList = extent->nextExtent; | 900 if (LIKELY(newPage->freelistHead != nullptr)) { |
| 751 } | 901 PartitionFreelistEntry* entry = newPage->freelistHead; |
| 752 if (extent->nextExtent) { | 902 PartitionFreelistEntry* newHead = partitionFreelistMask(entry->next); |
| 753 ASSERT(extent->nextExtent->prevExtent == extent); | 903 newPage->freelistHead = newHead; |
| 754 extent->nextExtent->prevExtent = extent->prevExtent; | 904 newPage->numAllocatedSlots++; |
| 755 } | 905 return entry; |
| 756 | 906 } |
| 757 // Add on the size of the trailing guard page and preceeding partition | 907 // Otherwise, we need to build the freelist. |
| 758 // page. | 908 ASSERT(newPage->numUnprovisionedSlots); |
| 759 unmapSize += kPartitionPageSize + kSystemPageSize; | 909 return partitionPageAllocAndFillFreelist(newPage); |
| 760 | 910 } |
| 761 size_t uncommittedPageSize = page->bucket->slotSize + kSystemPageSize; | 911 |
| 762 partitionDecreaseCommittedPages(root, uncommittedPageSize); | 912 static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, |
| 763 ASSERT(root->totalSizeOfDirectMappedPages >= uncommittedPageSize); | 913 PartitionPage* page) { |
| 764 root->totalSizeOfDirectMappedPages -= uncommittedPageSize; | 914 ASSERT(partitionPageStateIsEmpty(page)); |
| 765 | 915 ASSERT(!partitionBucketIsDirectMapped(page->bucket)); |
| 766 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask)); | 916 void* addr = partitionPageToPointer(page); |
| 767 | 917 partitionDecommitSystemPages(root, addr, partitionBucketBytes(page->bucket)); |
| 768 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 918 |
| 769 // Account for the mapping starting a partition page before the actual | 919 // We actually leave the decommitted page in the active list. We'll sweep |
| 770 // allocation address. | 920 // it on to the decommitted page list when we next walk the active page |
| 771 ptr -= kPartitionPageSize; | 921 // list. |
| 772 | 922 // Pulling this trick enables us to use a singly-linked page list for all |
| 773 freePages(ptr, unmapSize); | 923 // cases, which is critical in keeping the page metadata structure down to |
| 774 } | 924 // 32 bytes in size. |
| 775 | 925 page->freelistHead = 0; |
| 776 void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa
rtitionBucket* bucket) | 926 page->numUnprovisionedSlots = 0; |
| 777 { | 927 ASSERT(partitionPageStateIsDecommitted(page)); |
| 778 // The slow path is called when the freelist is empty. | 928 } |
| 779 ASSERT(!bucket->activePagesHead->freelistHead); | 929 |
| 780 | 930 static void partitionDecommitPageIfPossible(PartitionRootBase* root, |
| 781 PartitionPage* newPage = nullptr; | 931 PartitionPage* page) { |
| 782 | 932 ASSERT(page->emptyCacheIndex >= 0); |
| 783 // For the partitionAllocGeneric API, we have a bunch of buckets marked | 933 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); |
| 784 // as special cases. We bounce them through to the slow path so that we | 934 ASSERT(page == root->globalEmptyPageRing[page->emptyCacheIndex]); |
| 785 // can still have a blazing fast hot path due to lack of corner-case | 935 page->emptyCacheIndex = -1; |
| 786 // branches. | 936 if (partitionPageStateIsEmpty(page)) |
| 787 bool returnNull = flags & PartitionAllocReturnNull; | 937 partitionDecommitPage(root, page); |
| 788 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 938 } |
| 789 ASSERT(size > kGenericMaxBucketed); | 939 |
| 790 ASSERT(bucket == &PartitionRootBase::gPagedBucket); | 940 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) { |
| 791 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); | 941 ASSERT(partitionPageStateIsEmpty(page)); |
| 792 if (size > kGenericMaxDirectMapped) { | 942 PartitionRootBase* root = partitionPageToRoot(page); |
| 793 if (returnNull) | 943 |
| 794 return nullptr; | 944 // If the page is already registered as empty, give it another life. |
| 795 partitionExcessiveAllocationSize(); | 945 if (page->emptyCacheIndex != -1) { |
| 796 } | |
| 797 newPage = partitionDirectMap(root, flags, size); | |
| 798 } else if (LIKELY(partitionSetNewActivePage(bucket))) { | |
| 799 // First, did we find an active page in the active pages list? | |
| 800 newPage = bucket->activePagesHead; | |
| 801 ASSERT(partitionPageStateIsActive(newPage)); | |
| 802 } else if (LIKELY(bucket->emptyPagesHead != nullptr) || LIKELY(bucket->decom
mittedPagesHead != nullptr)) { | |
| 803 // Second, look in our lists of empty and decommitted pages. | |
| 804 // Check empty pages first, which are preferred, but beware that an | |
| 805 // empty page might have been decommitted. | |
| 806 while (LIKELY((newPage = bucket->emptyPagesHead) != nullptr)) { | |
| 807 ASSERT(newPage->bucket == bucket); | |
| 808 ASSERT(partitionPageStateIsEmpty(newPage) || partitionPageStateIsDec
ommitted(newPage)); | |
| 809 bucket->emptyPagesHead = newPage->nextPage; | |
| 810 // Accept the empty page unless it got decommitted. | |
| 811 if (newPage->freelistHead) { | |
| 812 newPage->nextPage = nullptr; | |
| 813 break; | |
| 814 } | |
| 815 ASSERT(partitionPageStateIsDecommitted(newPage)); | |
| 816 newPage->nextPage = bucket->decommittedPagesHead; | |
| 817 bucket->decommittedPagesHead = newPage; | |
| 818 } | |
| 819 if (UNLIKELY(!newPage) && LIKELY(bucket->decommittedPagesHead != nullptr
)) { | |
| 820 newPage = bucket->decommittedPagesHead; | |
| 821 ASSERT(newPage->bucket == bucket); | |
| 822 ASSERT(partitionPageStateIsDecommitted(newPage)); | |
| 823 bucket->decommittedPagesHead = newPage->nextPage; | |
| 824 void* addr = partitionPageToPointer(newPage); | |
| 825 partitionRecommitSystemPages(root, addr, partitionBucketBytes(newPag
e->bucket)); | |
| 826 partitionPageReset(newPage); | |
| 827 } | |
| 828 ASSERT(newPage); | |
| 829 } else { | |
| 830 // Third. If we get here, we need a brand new page. | |
| 831 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | |
| 832 void* rawPages = partitionAllocPartitionPages(root, flags, numPartitionP
ages); | |
| 833 if (LIKELY(rawPages != nullptr)) { | |
| 834 newPage = partitionPointerToPageNoAlignmentCheck(rawPages); | |
| 835 partitionPageSetup(newPage, bucket); | |
| 836 } | |
| 837 } | |
| 838 | |
| 839 // Bail if we had a memory allocation failure. | |
| 840 if (UNLIKELY(!newPage)) { | |
| 841 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); | |
| 842 if (returnNull) | |
| 843 return nullptr; | |
| 844 partitionOutOfMemory(root); | |
| 845 } | |
| 846 | |
| 847 bucket = newPage->bucket; | |
| 848 ASSERT(bucket != &PartitionRootBase::gPagedBucket); | |
| 849 bucket->activePagesHead = newPage; | |
| 850 partitionPageSetRawSize(newPage, size); | |
| 851 | |
| 852 // If we found an active page with free slots, or an empty page, we have a | |
| 853 // usable freelist head. | |
| 854 if (LIKELY(newPage->freelistHead != nullptr)) { | |
| 855 PartitionFreelistEntry* entry = newPage->freelistHead; | |
| 856 PartitionFreelistEntry* newHead = partitionFreelistMask(entry->next); | |
| 857 newPage->freelistHead = newHead; | |
| 858 newPage->numAllocatedSlots++; | |
| 859 return entry; | |
| 860 } | |
| 861 // Otherwise, we need to build the freelist. | |
| 862 ASSERT(newPage->numUnprovisionedSlots); | |
| 863 return partitionPageAllocAndFillFreelist(newPage); | |
| 864 } | |
| 865 | |
| 866 static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, Partiti
onPage* page) | |
| 867 { | |
| 868 ASSERT(partitionPageStateIsEmpty(page)); | |
| 869 ASSERT(!partitionBucketIsDirectMapped(page->bucket)); | |
| 870 void* addr = partitionPageToPointer(page); | |
| 871 partitionDecommitSystemPages(root, addr, partitionBucketBytes(page->bucket))
; | |
| 872 | |
| 873 // We actually leave the decommitted page in the active list. We'll sweep | |
| 874 // it on to the decommitted page list when we next walk the active page | |
| 875 // list. | |
| 876 // Pulling this trick enables us to use a singly-linked page list for all | |
| 877 // cases, which is critical in keeping the page metadata structure down to | |
| 878 // 32 bytes in size. | |
| 879 page->freelistHead = 0; | |
| 880 page->numUnprovisionedSlots = 0; | |
| 881 ASSERT(partitionPageStateIsDecommitted(page)); | |
| 882 } | |
| 883 | |
| 884 static void partitionDecommitPageIfPossible(PartitionRootBase* root, PartitionPa
ge* page) | |
| 885 { | |
| 886 ASSERT(page->emptyCacheIndex >= 0); | 946 ASSERT(page->emptyCacheIndex >= 0); |
| 887 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); | 947 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); |
| 888 ASSERT(page == root->globalEmptyPageRing[page->emptyCacheIndex]); | 948 ASSERT(root->globalEmptyPageRing[page->emptyCacheIndex] == page); |
| 889 page->emptyCacheIndex = -1; | 949 root->globalEmptyPageRing[page->emptyCacheIndex] = 0; |
| 890 if (partitionPageStateIsEmpty(page)) | 950 } |
| 891 partitionDecommitPage(root, page); | 951 |
| 892 } | 952 int16_t currentIndex = root->globalEmptyPageRingIndex; |
| 893 | 953 PartitionPage* pageToDecommit = root->globalEmptyPageRing[currentIndex]; |
| 894 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) | 954 // The page might well have been re-activated, filled up, etc. before we get |
| 895 { | 955 // around to looking at it here. |
| 896 ASSERT(partitionPageStateIsEmpty(page)); | 956 if (pageToDecommit) |
| 897 PartitionRootBase* root = partitionPageToRoot(page); | 957 partitionDecommitPageIfPossible(root, pageToDecommit); |
| 898 | 958 |
| 899 // If the page is already registered as empty, give it another life. | 959 // We put the empty slot span on our global list of "pages that were once |
| 900 if (page->emptyCacheIndex != -1) { | 960 // empty". thus providing it a bit of breathing room to get re-used before |
| 901 ASSERT(page->emptyCacheIndex >= 0); | 961 // we really free it. This improves performance, particularly on Mac OS X |
| 902 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans)
; | 962 // which has subpar memory management performance. |
| 903 ASSERT(root->globalEmptyPageRing[page->emptyCacheIndex] == page); | 963 root->globalEmptyPageRing[currentIndex] = page; |
| 904 root->globalEmptyPageRing[page->emptyCacheIndex] = 0; | 964 page->emptyCacheIndex = currentIndex; |
| 905 } | 965 ++currentIndex; |
| 906 | 966 if (currentIndex == kMaxFreeableSpans) |
| 907 int16_t currentIndex = root->globalEmptyPageRingIndex; | 967 currentIndex = 0; |
| 908 PartitionPage* pageToDecommit = root->globalEmptyPageRing[currentIndex]; | 968 root->globalEmptyPageRingIndex = currentIndex; |
| 909 // The page might well have been re-activated, filled up, etc. before we get | 969 } |
| 910 // around to looking at it here. | 970 |
| 911 if (pageToDecommit) | 971 static void partitionDecommitEmptyPages(PartitionRootBase* root) { |
| 912 partitionDecommitPageIfPossible(root, pageToDecommit); | 972 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { |
| 913 | 973 PartitionPage* page = root->globalEmptyPageRing[i]; |
| 914 // We put the empty slot span on our global list of "pages that were once | 974 if (page) |
| 915 // empty". thus providing it a bit of breathing room to get re-used before | 975 partitionDecommitPageIfPossible(root, page); |
| 916 // we really free it. This improves performance, particularly on Mac OS X | 976 root->globalEmptyPageRing[i] = nullptr; |
| 917 // which has subpar memory management performance. | 977 } |
| 918 root->globalEmptyPageRing[currentIndex] = page; | 978 } |
| 919 page->emptyCacheIndex = currentIndex; | 979 |
| 920 ++currentIndex; | 980 void partitionFreeSlowPath(PartitionPage* page) { |
| 921 if (currentIndex == kMaxFreeableSpans) | 981 PartitionBucket* bucket = page->bucket; |
| 922 currentIndex = 0; | 982 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 923 root->globalEmptyPageRingIndex = currentIndex; | 983 if (LIKELY(page->numAllocatedSlots == 0)) { |
| 924 } | 984 // Page became fully unused. |
| 925 | 985 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
| 926 static void partitionDecommitEmptyPages(PartitionRootBase* root) | 986 partitionDirectUnmap(page); |
| 927 { | 987 return; |
| 928 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | 988 } |
| 929 PartitionPage* page = root->globalEmptyPageRing[i]; | 989 // If it's the current active page, change it. We bounce the page to |
| 930 if (page) | 990 // the empty list as a force towards defragmentation. |
| 931 partitionDecommitPageIfPossible(root, page); | 991 if (LIKELY(page == bucket->activePagesHead)) |
| 932 root->globalEmptyPageRing[i] = nullptr; | 992 (void)partitionSetNewActivePage(bucket); |
| 933 } | 993 ASSERT(bucket->activePagesHead != page); |
| 934 } | 994 |
| 935 | 995 partitionPageSetRawSize(page, 0); |
| 936 void partitionFreeSlowPath(PartitionPage* page) | 996 ASSERT(!partitionPageGetRawSize(page)); |
| 937 { | 997 |
| 938 PartitionBucket* bucket = page->bucket; | 998 partitionRegisterEmptyPage(page); |
| 939 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 999 } else { |
| 940 if (LIKELY(page->numAllocatedSlots == 0)) { | 1000 ASSERT(!partitionBucketIsDirectMapped(bucket)); |
| 941 // Page became fully unused. | 1001 // Ensure that the page is full. That's the only valid case if we |
| 942 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 1002 // arrive here. |
| 943 partitionDirectUnmap(page); | 1003 ASSERT(page->numAllocatedSlots < 0); |
| 944 return; | 1004 // A transition of numAllocatedSlots from 0 to -1 is not legal, and |
| 945 } | 1005 // likely indicates a double-free. |
| 946 // If it's the current active page, change it. We bounce the page to | 1006 RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numAllocatedSlots != -1); |
| 947 // the empty list as a force towards defragmentation. | 1007 page->numAllocatedSlots = -page->numAllocatedSlots - 2; |
| 948 if (LIKELY(page == bucket->activePagesHead)) | 1008 ASSERT(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1); |
| 949 (void) partitionSetNewActivePage(bucket); | 1009 // Fully used page became partially used. It must be put back on the |
| 950 ASSERT(bucket->activePagesHead != page); | 1010 // non-full page list. Also make it the current page to increase the |
| 951 | 1011 // chances of it being filled up again. The old current page will be |
| 952 partitionPageSetRawSize(page, 0); | 1012 // the next page. |
| 953 ASSERT(!partitionPageGetRawSize(page)); | 1013 ASSERT(!page->nextPage); |
| 954 | 1014 if (LIKELY(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage)) |
| 955 partitionRegisterEmptyPage(page); | 1015 page->nextPage = bucket->activePagesHead; |
| 956 } else { | 1016 bucket->activePagesHead = page; |
| 957 ASSERT(!partitionBucketIsDirectMapped(bucket)); | 1017 --bucket->numFullPages; |
| 958 // Ensure that the page is full. That's the only valid case if we | 1018 // Special case: for a partition page with just a single slot, it may |
| 959 // arrive here. | 1019 // now be empty and we want to run it through the empty logic. |
| 960 ASSERT(page->numAllocatedSlots < 0); | 1020 if (UNLIKELY(page->numAllocatedSlots == 0)) |
| 961 // A transition of numAllocatedSlots from 0 to -1 is not legal, and | 1021 partitionFreeSlowPath(page); |
| 962 // likely indicates a double-free. | 1022 } |
| 963 RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(page->numAllocatedSlots != -1); | 1023 } |
| 964 page->numAllocatedSlots = -page->numAllocatedSlots - 2; | 1024 |
| 965 ASSERT(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1); | 1025 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, |
| 966 // Fully used page became partially used. It must be put back on the | 1026 PartitionPage* page, |
| 967 // non-full page list. Also make it the current page to increase the | 1027 size_t rawSize) { |
| 968 // chances of it being filled up again. The old current page will be | 1028 ASSERT(partitionBucketIsDirectMapped(page->bucket)); |
| 969 // the next page. | 1029 |
| 970 ASSERT(!page->nextPage); | 1030 rawSize = partitionCookieSizeAdjustAdd(rawSize); |
| 971 if (LIKELY(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage)) | 1031 |
| 972 page->nextPage = bucket->activePagesHead; | 1032 // Note that the new size might be a bucketed size; this function is called |
| 973 bucket->activePagesHead = page; | 1033 // whenever we're reallocating a direct mapped allocation. |
| 974 --bucket->numFullPages; | 1034 size_t newSize = partitionDirectMapSize(rawSize); |
| 975 // Special case: for a partition page with just a single slot, it may | 1035 if (newSize < kGenericMinDirectMappedDownsize) |
| 976 // now be empty and we want to run it through the empty logic. | 1036 return false; |
| 977 if (UNLIKELY(page->numAllocatedSlots == 0)) | 1037 |
| 978 partitionFreeSlowPath(page); | 1038 // bucket->slotSize is the current size of the allocation. |
| 979 } | 1039 size_t currentSize = page->bucket->slotSize; |
| 980 } | 1040 if (newSize == currentSize) |
| 981 | 1041 return true; |
| 982 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, PartitionPa
ge* page, size_t rawSize) | 1042 |
| 983 { | 1043 char* charPtr = static_cast<char*>(partitionPageToPointer(page)); |
| 984 ASSERT(partitionBucketIsDirectMapped(page->bucket)); | 1044 |
| 985 | 1045 if (newSize < currentSize) { |
| 986 rawSize = partitionCookieSizeAdjustAdd(rawSize); | 1046 size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize; |
| 987 | 1047 |
| 988 // Note that the new size might be a bucketed size; this function is called | 1048 // Don't reallocate in-place if new size is less than 80 % of the full |
| 989 // whenever we're reallocating a direct mapped allocation. | 1049 // map size, to avoid holding on to too much unused address space. |
| 990 size_t newSize = partitionDirectMapSize(rawSize); | 1050 if ((newSize / kSystemPageSize) * 5 < (mapSize / kSystemPageSize) * 4) |
| 991 if (newSize < kGenericMinDirectMappedDownsize) | 1051 return false; |
| 992 return false; | 1052 |
| 993 | 1053 // Shrink by decommitting unneeded pages and making them inaccessible. |
| 994 // bucket->slotSize is the current size of the allocation. | 1054 size_t decommitSize = currentSize - newSize; |
| 995 size_t currentSize = page->bucket->slotSize; | 1055 partitionDecommitSystemPages(root, charPtr + newSize, decommitSize); |
| 996 if (newSize == currentSize) | 1056 setSystemPagesInaccessible(charPtr + newSize, decommitSize); |
| 997 return true; | 1057 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { |
| 998 | 1058 // Grow within the actually allocated memory. Just need to make the |
| 999 char* charPtr = static_cast<char*>(partitionPageToPointer(page)); | 1059 // pages accessible again. |
| 1000 | 1060 size_t recommitSize = newSize - currentSize; |
| 1001 if (newSize < currentSize) { | 1061 bool ret = setSystemPagesAccessible(charPtr + currentSize, recommitSize); |
| 1002 size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize; | 1062 RELEASE_ASSERT(ret); |
| 1003 | 1063 partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize); |
| 1004 // Don't reallocate in-place if new size is less than 80 % of the full | |
| 1005 // map size, to avoid holding on to too much unused address space. | |
| 1006 if ((newSize / kSystemPageSize) * 5 < (mapSize / kSystemPageSize) * 4) | |
| 1007 return false; | |
| 1008 | |
| 1009 // Shrink by decommitting unneeded pages and making them inaccessible. | |
| 1010 size_t decommitSize = currentSize - newSize; | |
| 1011 partitionDecommitSystemPages(root, charPtr + newSize, decommitSize); | |
| 1012 setSystemPagesInaccessible(charPtr + newSize, decommitSize); | |
| 1013 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { | |
| 1014 // Grow within the actually allocated memory. Just need to make the | |
| 1015 // pages accessible again. | |
| 1016 size_t recommitSize = newSize - currentSize; | |
| 1017 bool ret = setSystemPagesAccessible(charPtr + currentSize, recommitSize)
; | |
| 1018 RELEASE_ASSERT(ret); | |
| 1019 partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize); | |
| 1020 | 1064 |
| 1021 #if ENABLE(ASSERT) | 1065 #if ENABLE(ASSERT) |
| 1022 memset(charPtr + currentSize, kUninitializedByte, recommitSize); | 1066 memset(charPtr + currentSize, kUninitializedByte, recommitSize); |
| 1023 #endif | 1067 #endif |
| 1024 } else { | 1068 } else { |
| 1025 // We can't perform the realloc in-place. | 1069 // We can't perform the realloc in-place. |
| 1026 // TODO: support this too when possible. | 1070 // TODO: support this too when possible. |
| 1027 return false; | 1071 return false; |
| 1028 } | 1072 } |
| 1029 | 1073 |
| 1030 #if ENABLE(ASSERT) | 1074 #if ENABLE(ASSERT) |
| 1031 // Write a new trailing cookie. | 1075 // Write a new trailing cookie. |
| 1032 partitionCookieWriteValue(charPtr + rawSize - kCookieSize); | 1076 partitionCookieWriteValue(charPtr + rawSize - kCookieSize); |
| 1033 #endif | 1077 #endif |
| 1034 | 1078 |
| 1035 partitionPageSetRawSize(page, rawSize); | 1079 partitionPageSetRawSize(page, rawSize); |
| 1036 ASSERT(partitionPageGetRawSize(page) == rawSize); | 1080 ASSERT(partitionPageGetRawSize(page) == rawSize); |
| 1037 | 1081 |
| 1038 page->bucket->slotSize = newSize; | 1082 page->bucket->slotSize = newSize; |
| 1039 return true; | 1083 return true; |
| 1040 } | 1084 } |
| 1041 | 1085 |
| 1042 void* partitionReallocGeneric(PartitionRootGeneric* root, void* ptr, size_t newS
ize, const char* typeName) | 1086 void* partitionReallocGeneric(PartitionRootGeneric* root, |
| 1043 { | 1087 void* ptr, |
| 1088 size_t newSize, |
| 1089 const char* typeName) { |
| 1044 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 1090 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 1045 return realloc(ptr, newSize); | 1091 return realloc(ptr, newSize); |
| 1046 #else | 1092 #else |
| 1047 if (UNLIKELY(!ptr)) | 1093 if (UNLIKELY(!ptr)) |
| 1048 return partitionAllocGeneric(root, newSize, typeName); | 1094 return partitionAllocGeneric(root, newSize, typeName); |
| 1049 if (UNLIKELY(!newSize)) { | 1095 if (UNLIKELY(!newSize)) { |
| 1050 partitionFreeGeneric(root, ptr); | |
| 1051 return 0; | |
| 1052 } | |
| 1053 | |
| 1054 if (newSize > kGenericMaxDirectMapped) | |
| 1055 partitionExcessiveAllocationSize(); | |
| 1056 | |
| 1057 ASSERT(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr))); | |
| 1058 | |
| 1059 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus
t(ptr)); | |
| 1060 | |
| 1061 if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) { | |
| 1062 // We may be able to perform the realloc in place by changing the | |
| 1063 // accessibility of memory pages and, if reducing the size, decommitting | |
| 1064 // them. | |
| 1065 if (partitionReallocDirectMappedInPlace(root, page, newSize)) { | |
| 1066 PartitionAllocHooks::reallocHookIfEnabled(ptr, ptr, newSize, typeNam
e); | |
| 1067 return ptr; | |
| 1068 } | |
| 1069 } | |
| 1070 | |
| 1071 size_t actualNewSize = partitionAllocActualSize(root, newSize); | |
| 1072 size_t actualOldSize = partitionAllocGetSize(ptr); | |
| 1073 | |
| 1074 // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the | |
| 1075 // new size is a significant percentage smaller. We could do the same if we | |
| 1076 // determine it is a win. | |
| 1077 if (actualNewSize == actualOldSize) { | |
| 1078 // Trying to allocate a block of size newSize would give us a block of | |
| 1079 // the same size as the one we've already got, so no point in doing | |
| 1080 // anything here. | |
| 1081 return ptr; | |
| 1082 } | |
| 1083 | |
| 1084 // This realloc cannot be resized in-place. Sadness. | |
| 1085 void* ret = partitionAllocGeneric(root, newSize, typeName); | |
| 1086 size_t copySize = actualOldSize; | |
| 1087 if (newSize < copySize) | |
| 1088 copySize = newSize; | |
| 1089 | |
| 1090 memcpy(ret, ptr, copySize); | |
| 1091 partitionFreeGeneric(root, ptr); | 1096 partitionFreeGeneric(root, ptr); |
| 1092 return ret; | 1097 return 0; |
| 1098 } |
| 1099 |
| 1100 if (newSize > kGenericMaxDirectMapped) |
| 1101 partitionExcessiveAllocationSize(); |
| 1102 |
| 1103 ASSERT(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr))); |
| 1104 |
| 1105 PartitionPage* page = |
| 1106 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); |
| 1107 |
| 1108 if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) { |
| 1109 // We may be able to perform the realloc in place by changing the |
| 1110 // accessibility of memory pages and, if reducing the size, decommitting |
| 1111 // them. |
| 1112 if (partitionReallocDirectMappedInPlace(root, page, newSize)) { |
| 1113 PartitionAllocHooks::reallocHookIfEnabled(ptr, ptr, newSize, typeName); |
| 1114 return ptr; |
| 1115 } |
| 1116 } |
| 1117 |
| 1118 size_t actualNewSize = partitionAllocActualSize(root, newSize); |
| 1119 size_t actualOldSize = partitionAllocGetSize(ptr); |
| 1120 |
| 1121 // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the |
| 1122 // new size is a significant percentage smaller. We could do the same if we |
| 1123 // determine it is a win. |
| 1124 if (actualNewSize == actualOldSize) { |
| 1125 // Trying to allocate a block of size newSize would give us a block of |
| 1126 // the same size as the one we've already got, so no point in doing |
| 1127 // anything here. |
| 1128 return ptr; |
| 1129 } |
| 1130 |
| 1131 // This realloc cannot be resized in-place. Sadness. |
| 1132 void* ret = partitionAllocGeneric(root, newSize, typeName); |
| 1133 size_t copySize = actualOldSize; |
| 1134 if (newSize < copySize) |
| 1135 copySize = newSize; |
| 1136 |
| 1137 memcpy(ret, ptr, copySize); |
| 1138 partitionFreeGeneric(root, ptr); |
| 1139 return ret; |
| 1093 #endif | 1140 #endif |
| 1094 } | 1141 } |
| 1095 | 1142 |
| 1096 static size_t partitionPurgePage(PartitionPage* page, bool discard) | 1143 static size_t partitionPurgePage(PartitionPage* page, bool discard) { |
| 1097 { | 1144 const PartitionBucket* bucket = page->bucket; |
| 1098 const PartitionBucket* bucket = page->bucket; | 1145 size_t slotSize = bucket->slotSize; |
| 1099 size_t slotSize = bucket->slotSize; | 1146 if (slotSize < kSystemPageSize || !page->numAllocatedSlots) |
| 1100 if (slotSize < kSystemPageSize || !page->numAllocatedSlots) | 1147 return 0; |
| 1101 return 0; | 1148 |
| 1102 | 1149 size_t bucketNumSlots = partitionBucketSlots(bucket); |
| 1103 size_t bucketNumSlots = partitionBucketSlots(bucket); | 1150 size_t discardableBytes = 0; |
| 1104 size_t discardableBytes = 0; | 1151 |
| 1105 | 1152 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); |
| 1106 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | 1153 if (rawSize) { |
| 1107 if (rawSize) { | 1154 uint32_t usedBytes = |
| 1108 uint32_t usedBytes = static_cast<uint32_t>(partitionRoundUpToSystemPage(
rawSize)); | 1155 static_cast<uint32_t>(partitionRoundUpToSystemPage(rawSize)); |
| 1109 discardableBytes = bucket->slotSize - usedBytes; | 1156 discardableBytes = bucket->slotSize - usedBytes; |
| 1110 if (discardableBytes && discard) { | 1157 if (discardableBytes && discard) { |
| 1111 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 1158 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 1112 ptr += usedBytes; | 1159 ptr += usedBytes; |
| 1113 discardSystemPages(ptr, discardableBytes); | 1160 discardSystemPages(ptr, discardableBytes); |
| 1114 } | |
| 1115 return discardableBytes; | |
| 1116 } | |
| 1117 | |
| 1118 const size_t maxSlotCount = (kPartitionPageSize * kMaxPartitionPagesPerSlotS
pan) / kSystemPageSize; | |
| 1119 ASSERT(bucketNumSlots <= maxSlotCount); | |
| 1120 ASSERT(page->numUnprovisionedSlots < bucketNumSlots); | |
| 1121 size_t numSlots = bucketNumSlots - page->numUnprovisionedSlots; | |
| 1122 char slotUsage[maxSlotCount]; | |
| 1123 size_t lastSlot = static_cast<size_t>(-1); | |
| 1124 memset(slotUsage, 1, numSlots); | |
| 1125 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | |
| 1126 PartitionFreelistEntry* entry = page->freelistHead; | |
| 1127 // First, walk the freelist for this page and make a bitmap of which slots | |
| 1128 // are not in use. | |
| 1129 while (entry) { | |
| 1130 size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slotSize; | |
| 1131 ASSERT(slotIndex < numSlots); | |
| 1132 slotUsage[slotIndex] = 0; | |
| 1133 entry = partitionFreelistMask(entry->next); | |
| 1134 // If we have a slot where the masked freelist entry is 0, we can | |
| 1135 // actually discard that freelist entry because touching a discarded | |
| 1136 // page is guaranteed to return original content or 0. | |
| 1137 // (Note that this optimization won't fire on big endian machines | |
| 1138 // because the masking function is negation.) | |
| 1139 if (!partitionFreelistMask(entry)) | |
| 1140 lastSlot = slotIndex; | |
| 1141 } | |
| 1142 | |
| 1143 // If the slot(s) at the end of the slot span are not in used, we can | |
| 1144 // truncate them entirely and rewrite the freelist. | |
| 1145 size_t truncatedSlots = 0; | |
| 1146 while (!slotUsage[numSlots - 1]) { | |
| 1147 truncatedSlots++; | |
| 1148 numSlots--; | |
| 1149 ASSERT(numSlots); | |
| 1150 } | |
| 1151 // First, do the work of calculating the discardable bytes. Don't actually | |
| 1152 // discard anything unless the discard flag was passed in. | |
| 1153 char* beginPtr = nullptr; | |
| 1154 char* endPtr = nullptr; | |
| 1155 size_t unprovisionedBytes = 0; | |
| 1156 if (truncatedSlots) { | |
| 1157 beginPtr = ptr + (numSlots * slotSize); | |
| 1158 endPtr = beginPtr + (slotSize * truncatedSlots); | |
| 1159 beginPtr = reinterpret_cast<char*>(partitionRoundUpToSystemPage(reinterp
ret_cast<size_t>(beginPtr))); | |
| 1160 // We round the end pointer here up and not down because we're at the | |
| 1161 // end of a slot span, so we "own" all the way up the page boundary. | |
| 1162 endPtr = reinterpret_cast<char*>(partitionRoundUpToSystemPage(reinterpre
t_cast<size_t>(endPtr))); | |
| 1163 ASSERT(endPtr <= ptr + partitionBucketBytes(bucket)); | |
| 1164 if (beginPtr < endPtr) { | |
| 1165 unprovisionedBytes = endPtr - beginPtr; | |
| 1166 discardableBytes += unprovisionedBytes; | |
| 1167 } | |
| 1168 } | |
| 1169 if (unprovisionedBytes && discard) { | |
| 1170 ASSERT(truncatedSlots > 0); | |
| 1171 size_t numNewEntries = 0; | |
| 1172 page->numUnprovisionedSlots += static_cast<uint16_t>(truncatedSlots); | |
| 1173 // Rewrite the freelist. | |
| 1174 PartitionFreelistEntry** entryPtr = &page->freelistHead; | |
| 1175 for (size_t slotIndex = 0; slotIndex < numSlots; ++slotIndex) { | |
| 1176 if (slotUsage[slotIndex]) | |
| 1177 continue; | |
| 1178 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEn
try*>(ptr + (slotSize * slotIndex)); | |
| 1179 *entryPtr = partitionFreelistMask(entry); | |
| 1180 entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry); | |
| 1181 numNewEntries++; | |
| 1182 } | |
| 1183 // Terminate the freelist chain. | |
| 1184 *entryPtr = nullptr; | |
| 1185 // The freelist head is stored unmasked. | |
| 1186 page->freelistHead = partitionFreelistMask(page->freelistHead); | |
| 1187 ASSERT(numNewEntries == numSlots - page->numAllocatedSlots); | |
| 1188 // Discard the memory. | |
| 1189 discardSystemPages(beginPtr, unprovisionedBytes); | |
| 1190 } | |
| 1191 | |
| 1192 // Next, walk the slots and for any not in use, consider where the system | |
| 1193 // page boundaries occur. We can release any system pages back to the | |
| 1194 // system as long as we don't interfere with a freelist pointer or an | |
| 1195 // adjacent slot. | |
| 1196 for (size_t i = 0; i < numSlots; ++i) { | |
| 1197 if (slotUsage[i]) | |
| 1198 continue; | |
| 1199 // The first address we can safely discard is just after the freelist | |
| 1200 // pointer. There's one quirk: if the freelist pointer is actually a | |
| 1201 // null, we can discard that pointer value too. | |
| 1202 char* beginPtr = ptr + (i * slotSize); | |
| 1203 char* endPtr = beginPtr + slotSize; | |
| 1204 if (i != lastSlot) | |
| 1205 beginPtr += sizeof(PartitionFreelistEntry); | |
| 1206 beginPtr = reinterpret_cast<char*>(partitionRoundUpToSystemPage(reinterp
ret_cast<size_t>(beginPtr))); | |
| 1207 endPtr = reinterpret_cast<char*>(partitionRoundDownToSystemPage(reinterp
ret_cast<size_t>(endPtr))); | |
| 1208 if (beginPtr < endPtr) { | |
| 1209 size_t partialSlotBytes = endPtr - beginPtr; | |
| 1210 discardableBytes += partialSlotBytes; | |
| 1211 if (discard) | |
| 1212 discardSystemPages(beginPtr, partialSlotBytes); | |
| 1213 } | |
| 1214 } | 1161 } |
| 1215 return discardableBytes; | 1162 return discardableBytes; |
| 1216 } | 1163 } |
| 1217 | 1164 |
| 1218 static void partitionPurgeBucket(PartitionBucket* bucket) | 1165 const size_t maxSlotCount = |
| 1219 { | 1166 (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; |
| 1220 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { | 1167 ASSERT(bucketNumSlots <= maxSlotCount); |
| 1221 for (PartitionPage* page = bucket->activePagesHead; page; page = page->n
extPage) { | 1168 ASSERT(page->numUnprovisionedSlots < bucketNumSlots); |
| 1222 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 1169 size_t numSlots = bucketNumSlots - page->numUnprovisionedSlots; |
| 1223 (void) partitionPurgePage(page, true); | 1170 char slotUsage[maxSlotCount]; |
| 1224 } | 1171 size_t lastSlot = static_cast<size_t>(-1); |
| 1225 } | 1172 memset(slotUsage, 1, numSlots); |
| 1226 } | 1173 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 1227 | 1174 PartitionFreelistEntry* entry = page->freelistHead; |
| 1228 void partitionPurgeMemory(PartitionRoot* root, int flags) | 1175 // First, walk the freelist for this page and make a bitmap of which slots |
| 1229 { | 1176 // are not in use. |
| 1230 if (flags & PartitionPurgeDecommitEmptyPages) | 1177 while (entry) { |
| 1231 partitionDecommitEmptyPages(root); | 1178 size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slotSize; |
| 1232 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages | 1179 ASSERT(slotIndex < numSlots); |
| 1233 // here because that flag is only useful for allocations >= system page | 1180 slotUsage[slotIndex] = 0; |
| 1234 // size. We only have allocations that large inside generic partitions | 1181 entry = partitionFreelistMask(entry->next); |
| 1235 // at the moment. | 1182 // If we have a slot where the masked freelist entry is 0, we can |
| 1236 } | 1183 // actually discard that freelist entry because touching a discarded |
| 1237 | 1184 // page is guaranteed to return original content or 0. |
| 1238 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) | 1185 // (Note that this optimization won't fire on big endian machines |
| 1239 { | 1186 // because the masking function is negation.) |
| 1240 SpinLock::Guard guard(root->lock); | 1187 if (!partitionFreelistMask(entry)) |
| 1241 if (flags & PartitionPurgeDecommitEmptyPages) | 1188 lastSlot = slotIndex; |
| 1242 partitionDecommitEmptyPages(root); | 1189 } |
| 1243 if (flags & PartitionPurgeDiscardUnusedSystemPages) { | 1190 |
| 1244 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1191 // If the slot(s) at the end of the slot span are not in used, we can |
| 1245 PartitionBucket* bucket = &root->buckets[i]; | 1192 // truncate them entirely and rewrite the freelist. |
| 1246 if (bucket->slotSize >= kSystemPageSize) | 1193 size_t truncatedSlots = 0; |
| 1247 partitionPurgeBucket(bucket); | 1194 while (!slotUsage[numSlots - 1]) { |
| 1248 } | 1195 truncatedSlots++; |
| 1249 } | 1196 numSlots--; |
| 1250 } | 1197 ASSERT(numSlots); |
| 1251 | 1198 } |
| 1252 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P
artitionPage* page) | 1199 // First, do the work of calculating the discardable bytes. Don't actually |
| 1253 { | 1200 // discard anything unless the discard flag was passed in. |
| 1254 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); | 1201 char* beginPtr = nullptr; |
| 1255 | 1202 char* endPtr = nullptr; |
| 1256 if (partitionPageStateIsDecommitted(page)) { | 1203 size_t unprovisionedBytes = 0; |
| 1257 ++statsOut->numDecommittedPages; | 1204 if (truncatedSlots) { |
| 1258 return; | 1205 beginPtr = ptr + (numSlots * slotSize); |
| 1259 } | 1206 endPtr = beginPtr + (slotSize * truncatedSlots); |
| 1260 | 1207 beginPtr = reinterpret_cast<char*>( |
| 1261 statsOut->discardableBytes += partitionPurgePage(const_cast<PartitionPage*>(
page), false); | 1208 partitionRoundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); |
| 1262 | 1209 // We round the end pointer here up and not down because we're at the |
| 1263 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | 1210 // end of a slot span, so we "own" all the way up the page boundary. |
| 1264 if (rawSize) | 1211 endPtr = reinterpret_cast<char*>( |
| 1265 statsOut->activeBytes += static_cast<uint32_t>(rawSize); | 1212 partitionRoundUpToSystemPage(reinterpret_cast<size_t>(endPtr))); |
| 1266 else | 1213 ASSERT(endPtr <= ptr + partitionBucketBytes(bucket)); |
| 1267 statsOut->activeBytes += (page->numAllocatedSlots * statsOut->bucketSlot
Size); | 1214 if (beginPtr < endPtr) { |
| 1268 | 1215 unprovisionedBytes = endPtr - beginPtr; |
| 1269 size_t pageBytesResident = partitionRoundUpToSystemPage((bucketNumSlots - pa
ge->numUnprovisionedSlots) * statsOut->bucketSlotSize); | 1216 discardableBytes += unprovisionedBytes; |
| 1270 statsOut->residentBytes += pageBytesResident; | 1217 } |
| 1271 if (partitionPageStateIsEmpty(page)) { | 1218 } |
| 1272 statsOut->decommittableBytes += pageBytesResident; | 1219 if (unprovisionedBytes && discard) { |
| 1273 ++statsOut->numEmptyPages; | 1220 ASSERT(truncatedSlots > 0); |
| 1274 } else if (partitionPageStateIsFull(page)) { | 1221 size_t numNewEntries = 0; |
| 1275 ++statsOut->numFullPages; | 1222 page->numUnprovisionedSlots += static_cast<uint16_t>(truncatedSlots); |
| 1276 } else { | 1223 // Rewrite the freelist. |
| 1277 ASSERT(partitionPageStateIsActive(page)); | 1224 PartitionFreelistEntry** entryPtr = &page->freelistHead; |
| 1278 ++statsOut->numActivePages; | 1225 for (size_t slotIndex = 0; slotIndex < numSlots; ++slotIndex) { |
| 1279 } | 1226 if (slotUsage[slotIndex]) |
| 1280 } | 1227 continue; |
| 1281 | 1228 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>( |
| 1282 static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, const
PartitionBucket* bucket) | 1229 ptr + (slotSize * slotIndex)); |
| 1283 { | 1230 *entryPtr = partitionFreelistMask(entry); |
| 1284 ASSERT(!partitionBucketIsDirectMapped(bucket)); | 1231 entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry); |
| 1285 statsOut->isValid = false; | 1232 numNewEntries++; |
| 1286 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), | 1233 } |
| 1287 // the bucket might still need to be reported if it has a list of empty, | 1234 // Terminate the freelist chain. |
| 1288 // decommitted or full pages. | 1235 *entryPtr = nullptr; |
| 1289 if (bucket->activePagesHead == &PartitionRootGeneric::gSeedPage && !bucket->
emptyPagesHead && !bucket->decommittedPagesHead && !bucket->numFullPages) | 1236 // The freelist head is stored unmasked. |
| 1290 return; | 1237 page->freelistHead = partitionFreelistMask(page->freelistHead); |
| 1291 | 1238 ASSERT(numNewEntries == numSlots - page->numAllocatedSlots); |
| 1292 memset(statsOut, '\0', sizeof(*statsOut)); | 1239 // Discard the memory. |
| 1293 statsOut->isValid = true; | 1240 discardSystemPages(beginPtr, unprovisionedBytes); |
| 1294 statsOut->isDirectMap = false; | 1241 } |
| 1295 statsOut->numFullPages = static_cast<size_t>(bucket->numFullPages); | 1242 |
| 1296 statsOut->bucketSlotSize = bucket->slotSize; | 1243 // Next, walk the slots and for any not in use, consider where the system |
| 1297 uint16_t bucketNumSlots = partitionBucketSlots(bucket); | 1244 // page boundaries occur. We can release any system pages back to the |
| 1298 size_t bucketUsefulStorage = statsOut->bucketSlotSize * bucketNumSlots; | 1245 // system as long as we don't interfere with a freelist pointer or an |
| 1299 statsOut->allocatedPageSize = partitionBucketBytes(bucket); | 1246 // adjacent slot. |
| 1300 statsOut->activeBytes = bucket->numFullPages * bucketUsefulStorage; | 1247 for (size_t i = 0; i < numSlots; ++i) { |
| 1301 statsOut->residentBytes = bucket->numFullPages * statsOut->allocatedPageSize
; | 1248 if (slotUsage[i]) |
| 1302 | 1249 continue; |
| 1303 for (const PartitionPage* page = bucket->emptyPagesHead; page; page = page->
nextPage) { | 1250 // The first address we can safely discard is just after the freelist |
| 1304 ASSERT(partitionPageStateIsEmpty(page) || partitionPageStateIsDecommitte
d(page)); | 1251 // pointer. There's one quirk: if the freelist pointer is actually a |
| 1305 partitionDumpPageStats(statsOut, page); | 1252 // null, we can discard that pointer value too. |
| 1306 } | 1253 char* beginPtr = ptr + (i * slotSize); |
| 1307 for (const PartitionPage* page = bucket->decommittedPagesHead; page; page =
page->nextPage) { | 1254 char* endPtr = beginPtr + slotSize; |
| 1308 ASSERT(partitionPageStateIsDecommitted(page)); | 1255 if (i != lastSlot) |
| 1309 partitionDumpPageStats(statsOut, page); | 1256 beginPtr += sizeof(PartitionFreelistEntry); |
| 1310 } | 1257 beginPtr = reinterpret_cast<char*>( |
| 1311 | 1258 partitionRoundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); |
| 1312 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { | 1259 endPtr = reinterpret_cast<char*>( |
| 1313 for (const PartitionPage* page = bucket->activePagesHead; page; page = p
age->nextPage) { | 1260 partitionRoundDownToSystemPage(reinterpret_cast<size_t>(endPtr))); |
| 1314 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 1261 if (beginPtr < endPtr) { |
| 1315 partitionDumpPageStats(statsOut, page); | 1262 size_t partialSlotBytes = endPtr - beginPtr; |
| 1316 } | 1263 discardableBytes += partialSlotBytes; |
| 1317 } | 1264 if (discard) |
| 1318 } | 1265 discardSystemPages(beginPtr, partialSlotBytes); |
| 1319 | 1266 } |
| 1320 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* part
itionName, bool isLightDump, PartitionStatsDumper* partitionStatsDumper) | 1267 } |
| 1321 { | 1268 return discardableBytes; |
| 1322 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; | 1269 } |
| 1323 static const size_t kMaxReportableDirectMaps = 4096; | 1270 |
| 1324 uint32_t directMapLengths[kMaxReportableDirectMaps]; | 1271 static void partitionPurgeBucket(PartitionBucket* bucket) { |
| 1325 size_t numDirectMappedAllocations = 0; | 1272 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { |
| 1326 | 1273 for (PartitionPage* page = bucket->activePagesHead; page; |
| 1327 { | 1274 page = page->nextPage) { |
| 1328 SpinLock::Guard guard(partition->lock); | 1275 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 1329 | 1276 (void)partitionPurgePage(page, true); |
| 1330 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1277 } |
| 1331 const PartitionBucket* bucket = &partition->buckets[i]; | 1278 } |
| 1332 // Don't report the pseudo buckets that the generic allocator sets u
p in | 1279 } |
| 1333 // order to preserve a fast size->bucket map (see | 1280 |
| 1334 // partitionAllocGenericInit for details). | 1281 void partitionPurgeMemory(PartitionRoot* root, int flags) { |
| 1335 if (!bucket->activePagesHead) | 1282 if (flags & PartitionPurgeDecommitEmptyPages) |
| 1336 bucketStats[i].isValid = false; | 1283 partitionDecommitEmptyPages(root); |
| 1337 else | 1284 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages |
| 1338 partitionDumpBucketStats(&bucketStats[i], bucket); | 1285 // here because that flag is only useful for allocations >= system page |
| 1339 } | 1286 // size. We only have allocations that large inside generic partitions |
| 1340 | 1287 // at the moment. |
| 1341 for (PartitionDirectMapExtent* extent = partition->directMapList; extent
; extent = extent->nextExtent) { | 1288 } |
| 1342 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == exte
nt); | 1289 |
| 1343 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotS
ize; | 1290 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) { |
| 1344 ++numDirectMappedAllocations; | 1291 SpinLock::Guard guard(root->lock); |
| 1345 if (numDirectMappedAllocations == kMaxReportableDirectMaps) | 1292 if (flags & PartitionPurgeDecommitEmptyPages) |
| 1346 break; | 1293 partitionDecommitEmptyPages(root); |
| 1347 } | 1294 if (flags & PartitionPurgeDiscardUnusedSystemPages) { |
| 1348 } | |
| 1349 | |
| 1350 // partitionsDumpBucketStats is called after collecting stats because it | |
| 1351 // can try to allocate using PartitionAllocGeneric and it can't obtain the | |
| 1352 // lock. | |
| 1353 PartitionMemoryStats partitionStats = { 0 }; | |
| 1354 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages + partit
ion->totalSizeOfDirectMappedPages; | |
| 1355 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; | |
| 1356 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1295 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1357 if (bucketStats[i].isValid) { | 1296 PartitionBucket* bucket = &root->buckets[i]; |
| 1358 partitionStats.totalResidentBytes += bucketStats[i].residentBytes; | 1297 if (bucket->slotSize >= kSystemPageSize) |
| 1359 partitionStats.totalActiveBytes += bucketStats[i].activeBytes; | 1298 partitionPurgeBucket(bucket); |
| 1360 partitionStats.totalDecommittableBytes += bucketStats[i].decommittab
leBytes; | 1299 } |
| 1361 partitionStats.totalDiscardableBytes += bucketStats[i].discardableBy
tes; | 1300 } |
| 1362 if (!isLightDump) | 1301 } |
| 1363 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &
bucketStats[i]); | 1302 |
| 1364 } | 1303 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, |
| 1365 } | 1304 const PartitionPage* page) { |
| 1366 | 1305 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); |
| 1367 size_t directMappedAllocationsTotalSize = 0; | 1306 |
| 1368 for (size_t i = 0; i < numDirectMappedAllocations; ++i) { | 1307 if (partitionPageStateIsDecommitted(page)) { |
| 1369 PartitionBucketMemoryStats stats; | 1308 ++statsOut->numDecommittedPages; |
| 1370 memset(&stats, '\0', sizeof(stats)); | 1309 return; |
| 1371 stats.isValid = true; | 1310 } |
| 1372 stats.isDirectMap = true; | 1311 |
| 1373 stats.numFullPages = 1; | 1312 statsOut->discardableBytes += |
| 1374 uint32_t size = directMapLengths[i]; | 1313 partitionPurgePage(const_cast<PartitionPage*>(page), false); |
| 1375 stats.allocatedPageSize = size; | 1314 |
| 1376 stats.bucketSlotSize = size; | 1315 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); |
| 1377 stats.activeBytes = size; | 1316 if (rawSize) |
| 1378 stats.residentBytes = size; | 1317 statsOut->activeBytes += static_cast<uint32_t>(rawSize); |
| 1379 directMappedAllocationsTotalSize += size; | 1318 else |
| 1380 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &stats); | 1319 statsOut->activeBytes += |
| 1381 } | 1320 (page->numAllocatedSlots * statsOut->bucketSlotSize); |
| 1382 partitionStats.totalResidentBytes += directMappedAllocationsTotalSize; | 1321 |
| 1383 partitionStats.totalActiveBytes += directMappedAllocationsTotalSize; | 1322 size_t pageBytesResident = partitionRoundUpToSystemPage( |
| 1384 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | 1323 (bucketNumSlots - page->numUnprovisionedSlots) * |
| 1385 } | 1324 statsOut->bucketSlotSize); |
| 1386 | 1325 statsOut->residentBytes += pageBytesResident; |
| 1387 void partitionDumpStats(PartitionRoot* partition, const char* partitionName, boo
l isLightDump, PartitionStatsDumper* partitionStatsDumper) | 1326 if (partitionPageStateIsEmpty(page)) { |
| 1388 { | 1327 statsOut->decommittableBytes += pageBytesResident; |
| 1389 static const size_t kMaxReportableBuckets = 4096 / sizeof(void*); | 1328 ++statsOut->numEmptyPages; |
| 1390 PartitionBucketMemoryStats memoryStats[kMaxReportableBuckets]; | 1329 } else if (partitionPageStateIsFull(page)) { |
| 1391 const size_t partitionNumBuckets = partition->numBuckets; | 1330 ++statsOut->numFullPages; |
| 1392 ASSERT(partitionNumBuckets <= kMaxReportableBuckets); | 1331 } else { |
| 1393 | 1332 ASSERT(partitionPageStateIsActive(page)); |
| 1394 for (size_t i = 0; i < partitionNumBuckets; ++i) | 1333 ++statsOut->numActivePages; |
| 1395 partitionDumpBucketStats(&memoryStats[i], &partition->buckets()[i]); | 1334 } |
| 1396 | 1335 } |
| 1397 // partitionsDumpBucketStats is called after collecting stats because it | 1336 |
| 1398 // can use PartitionAlloc to allocate and this can affect the statistics. | 1337 static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, |
| 1399 PartitionMemoryStats partitionStats = { 0 }; | 1338 const PartitionBucket* bucket) { |
| 1400 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages; | 1339 ASSERT(!partitionBucketIsDirectMapped(bucket)); |
| 1401 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; | 1340 statsOut->isValid = false; |
| 1402 ASSERT(!partition->totalSizeOfDirectMappedPages); | 1341 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), |
| 1403 for (size_t i = 0; i < partitionNumBuckets; ++i) { | 1342 // the bucket might still need to be reported if it has a list of empty, |
| 1404 if (memoryStats[i].isValid) { | 1343 // decommitted or full pages. |
| 1405 partitionStats.totalResidentBytes += memoryStats[i].residentBytes; | 1344 if (bucket->activePagesHead == &PartitionRootGeneric::gSeedPage && |
| 1406 partitionStats.totalActiveBytes += memoryStats[i].activeBytes; | 1345 !bucket->emptyPagesHead && !bucket->decommittedPagesHead && |
| 1407 partitionStats.totalDecommittableBytes += memoryStats[i].decommittab
leBytes; | 1346 !bucket->numFullPages) |
| 1408 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBy
tes; | 1347 return; |
| 1409 if (!isLightDump) | 1348 |
| 1410 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &
memoryStats[i]); | 1349 memset(statsOut, '\0', sizeof(*statsOut)); |
| 1411 } | 1350 statsOut->isValid = true; |
| 1412 } | 1351 statsOut->isDirectMap = false; |
| 1413 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | 1352 statsOut->numFullPages = static_cast<size_t>(bucket->numFullPages); |
| 1414 } | 1353 statsOut->bucketSlotSize = bucket->slotSize; |
| 1415 | 1354 uint16_t bucketNumSlots = partitionBucketSlots(bucket); |
| 1416 } // namespace WTF | 1355 size_t bucketUsefulStorage = statsOut->bucketSlotSize * bucketNumSlots; |
| 1417 | 1356 statsOut->allocatedPageSize = partitionBucketBytes(bucket); |
| 1357 statsOut->activeBytes = bucket->numFullPages * bucketUsefulStorage; |
| 1358 statsOut->residentBytes = bucket->numFullPages * statsOut->allocatedPageSize; |
| 1359 |
| 1360 for (const PartitionPage* page = bucket->emptyPagesHead; page; |
| 1361 page = page->nextPage) { |
| 1362 ASSERT(partitionPageStateIsEmpty(page) || |
| 1363 partitionPageStateIsDecommitted(page)); |
| 1364 partitionDumpPageStats(statsOut, page); |
| 1365 } |
| 1366 for (const PartitionPage* page = bucket->decommittedPagesHead; page; |
| 1367 page = page->nextPage) { |
| 1368 ASSERT(partitionPageStateIsDecommitted(page)); |
| 1369 partitionDumpPageStats(statsOut, page); |
| 1370 } |
| 1371 |
| 1372 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { |
| 1373 for (const PartitionPage* page = bucket->activePagesHead; page; |
| 1374 page = page->nextPage) { |
| 1375 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 1376 partitionDumpPageStats(statsOut, page); |
| 1377 } |
| 1378 } |
| 1379 } |
| 1380 |
| 1381 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, |
| 1382 const char* partitionName, |
| 1383 bool isLightDump, |
| 1384 PartitionStatsDumper* partitionStatsDumper) { |
| 1385 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; |
| 1386 static const size_t kMaxReportableDirectMaps = 4096; |
| 1387 uint32_t directMapLengths[kMaxReportableDirectMaps]; |
| 1388 size_t numDirectMappedAllocations = 0; |
| 1389 |
| 1390 { |
| 1391 SpinLock::Guard guard(partition->lock); |
| 1392 |
| 1393 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1394 const PartitionBucket* bucket = &partition->buckets[i]; |
| 1395 // Don't report the pseudo buckets that the generic allocator sets up in |
| 1396 // order to preserve a fast size->bucket map (see |
| 1397 // partitionAllocGenericInit for details). |
| 1398 if (!bucket->activePagesHead) |
| 1399 bucketStats[i].isValid = false; |
| 1400 else |
| 1401 partitionDumpBucketStats(&bucketStats[i], bucket); |
| 1402 } |
| 1403 |
| 1404 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; |
| 1405 extent = extent->nextExtent) { |
| 1406 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent); |
| 1407 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; |
| 1408 ++numDirectMappedAllocations; |
| 1409 if (numDirectMappedAllocations == kMaxReportableDirectMaps) |
| 1410 break; |
| 1411 } |
| 1412 } |
| 1413 |
| 1414 // partitionsDumpBucketStats is called after collecting stats because it |
| 1415 // can try to allocate using PartitionAllocGeneric and it can't obtain the |
| 1416 // lock. |
| 1417 PartitionMemoryStats partitionStats = {0}; |
| 1418 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages + |
| 1419 partition->totalSizeOfDirectMappedPages; |
| 1420 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; |
| 1421 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1422 if (bucketStats[i].isValid) { |
| 1423 partitionStats.totalResidentBytes += bucketStats[i].residentBytes; |
| 1424 partitionStats.totalActiveBytes += bucketStats[i].activeBytes; |
| 1425 partitionStats.totalDecommittableBytes += |
| 1426 bucketStats[i].decommittableBytes; |
| 1427 partitionStats.totalDiscardableBytes += bucketStats[i].discardableBytes; |
| 1428 if (!isLightDump) |
| 1429 partitionStatsDumper->partitionsDumpBucketStats(partitionName, |
| 1430 &bucketStats[i]); |
| 1431 } |
| 1432 } |
| 1433 |
| 1434 size_t directMappedAllocationsTotalSize = 0; |
| 1435 for (size_t i = 0; i < numDirectMappedAllocations; ++i) { |
| 1436 PartitionBucketMemoryStats stats; |
| 1437 memset(&stats, '\0', sizeof(stats)); |
| 1438 stats.isValid = true; |
| 1439 stats.isDirectMap = true; |
| 1440 stats.numFullPages = 1; |
| 1441 uint32_t size = directMapLengths[i]; |
| 1442 stats.allocatedPageSize = size; |
| 1443 stats.bucketSlotSize = size; |
| 1444 stats.activeBytes = size; |
| 1445 stats.residentBytes = size; |
| 1446 directMappedAllocationsTotalSize += size; |
| 1447 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &stats); |
| 1448 } |
| 1449 partitionStats.totalResidentBytes += directMappedAllocationsTotalSize; |
| 1450 partitionStats.totalActiveBytes += directMappedAllocationsTotalSize; |
| 1451 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); |
| 1452 } |
| 1453 |
| 1454 void partitionDumpStats(PartitionRoot* partition, |
| 1455 const char* partitionName, |
| 1456 bool isLightDump, |
| 1457 PartitionStatsDumper* partitionStatsDumper) { |
| 1458 static const size_t kMaxReportableBuckets = 4096 / sizeof(void*); |
| 1459 PartitionBucketMemoryStats memoryStats[kMaxReportableBuckets]; |
| 1460 const size_t partitionNumBuckets = partition->numBuckets; |
| 1461 ASSERT(partitionNumBuckets <= kMaxReportableBuckets); |
| 1462 |
| 1463 for (size_t i = 0; i < partitionNumBuckets; ++i) |
| 1464 partitionDumpBucketStats(&memoryStats[i], &partition->buckets()[i]); |
| 1465 |
| 1466 // partitionsDumpBucketStats is called after collecting stats because it |
| 1467 // can use PartitionAlloc to allocate and this can affect the statistics. |
| 1468 PartitionMemoryStats partitionStats = {0}; |
| 1469 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages; |
| 1470 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; |
| 1471 ASSERT(!partition->totalSizeOfDirectMappedPages); |
| 1472 for (size_t i = 0; i < partitionNumBuckets; ++i) { |
| 1473 if (memoryStats[i].isValid) { |
| 1474 partitionStats.totalResidentBytes += memoryStats[i].residentBytes; |
| 1475 partitionStats.totalActiveBytes += memoryStats[i].activeBytes; |
| 1476 partitionStats.totalDecommittableBytes += |
| 1477 memoryStats[i].decommittableBytes; |
| 1478 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBytes; |
| 1479 if (!isLightDump) |
| 1480 partitionStatsDumper->partitionsDumpBucketStats(partitionName, |
| 1481 &memoryStats[i]); |
| 1482 } |
| 1483 } |
| 1484 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); |
| 1485 } |
| 1486 |
| 1487 } // namespace WTF |
| OLD | NEW |