Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 * | 3 // found in the LICENSE file. |
| 4 * Redistribution and use in source and binary forms, with or without | |
| 5 * modification, are permitted provided that the following conditions are | |
| 6 * met: | |
| 7 * | |
| 8 * * Redistributions of source code must retain the above copyright | |
| 9 * notice, this list of conditions and the following disclaimer. | |
| 10 * * Redistributions in binary form must reproduce the above | |
| 11 * copyright notice, this list of conditions and the following disclaimer | |
| 12 * in the documentation and/or other materials provided with the | |
| 13 * distribution. | |
| 14 * * Neither the name of Google Inc. nor the names of its | |
| 15 * contributors may be used to endorse or promote products derived from | |
| 16 * this software without specific prior written permission. | |
| 17 * | |
| 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 29 */ | |
| 30 | 4 |
| 31 #include "wtf/allocator/PartitionAlloc.h" | 5 #include "base/allocator/partition_allocator/partition_alloc.h" |
| 32 | 6 |
| 33 #include <string.h> | 7 #include <string.h> |
| 34 | 8 |
| 9 #include "base/compiler_specific.h" | |
| 10 #include "base/synchronization/spin_lock.h" | |
| 11 | |
| 35 #ifndef NDEBUG | 12 #ifndef NDEBUG |
| 36 #include <stdio.h> | 13 #include <stdio.h> |
| 37 #endif | 14 #endif |
| 38 | 15 |
| 39 // Two partition pages are used as guard / metadata page so make sure the super | 16 // Two partition pages are used as guard / metadata page so make sure the super |
| 40 // page size is bigger. | 17 // page size is bigger. |
| 41 static_assert(WTF::kPartitionPageSize * 4 <= WTF::kSuperPageSize, | 18 static_assert(base::kPartitionPageSize * 4 <= base::kSuperPageSize, |
| 42 "ok super page size"); | 19 "ok super page size"); |
| 43 static_assert(!(WTF::kSuperPageSize % WTF::kPartitionPageSize), | 20 static_assert(!(base::kSuperPageSize % base::kPartitionPageSize), |
| 44 "ok super page multiple"); | 21 "ok super page multiple"); |
| 45 // Four system pages gives us room to hack out a still-guard-paged piece | 22 // Four system pages gives us room to hack out a still-guard-paged piece |
| 46 // of metadata in the middle of a guard partition page. | 23 // of metadata in the middle of a guard partition page. |
| 47 static_assert(WTF::kSystemPageSize * 4 <= WTF::kPartitionPageSize, | 24 static_assert(base::kSystemPageSize * 4 <= base::kPartitionPageSize, |
| 48 "ok partition page size"); | 25 "ok partition page size"); |
| 49 static_assert(!(WTF::kPartitionPageSize % WTF::kSystemPageSize), | 26 static_assert(!(base::kPartitionPageSize % base::kSystemPageSize), |
| 50 "ok partition page multiple"); | 27 "ok partition page multiple"); |
| 51 static_assert(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, | 28 static_assert(sizeof(base::PartitionPage) <= base::kPageMetadataSize, |
| 52 "PartitionPage should not be too big"); | 29 "PartitionPage should not be too big"); |
| 53 static_assert(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, | 30 static_assert(sizeof(base::PartitionBucket) <= base::kPageMetadataSize, |
| 54 "PartitionBucket should not be too big"); | 31 "PartitionBucket should not be too big"); |
| 55 static_assert(sizeof(WTF::PartitionSuperPageExtentEntry) <= | 32 static_assert(sizeof(base::PartitionSuperPageExtentEntry) <= |
| 56 WTF::kPageMetadataSize, | 33 base::kPageMetadataSize, |
| 57 "PartitionSuperPageExtentEntry should not be too big"); | 34 "PartitionSuperPageExtentEntry should not be too big"); |
| 58 static_assert(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= | 35 static_assert(base::kPageMetadataSize * base::kNumPartitionPagesPerSuperPage <= |
| 59 WTF::kSystemPageSize, | 36 base::kSystemPageSize, |
| 60 "page metadata fits in hole"); | 37 "page metadata fits in hole"); |
| 61 // Check that some of our zanier calculations worked out as expected. | 38 // Check that some of our zanier calculations worked out as expected. |
| 62 static_assert(WTF::kGenericSmallestBucket == 8, "generic smallest bucket"); | 39 static_assert(base::kGenericSmallestBucket == 8, "generic smallest bucket"); |
| 63 static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed"); | 40 static_assert(base::kGenericMaxBucketed == 983040, "generic max bucketed"); |
| 64 static_assert(WTF::kMaxSystemPagesPerSlotSpan < (1 << 8), | 41 static_assert(base::kMaxSystemPagesPerSlotSpan < (1 << 8), |
| 65 "System pages per slot span must be less than 128."); | 42 "System pages per slot span must be less than 128."); |
| 66 | 43 |
| 67 namespace WTF { | 44 namespace base { |
| 68 | 45 |
| 69 SpinLock PartitionRootBase::gInitializedLock; | 46 subtle::SpinLock PartitionRootBase::gInitializedLock; |
| 70 bool PartitionRootBase::gInitialized = false; | 47 bool PartitionRootBase::gInitialized = false; |
| 71 PartitionPage PartitionRootBase::gSeedPage; | 48 PartitionPage PartitionRootBase::gSeedPage; |
| 72 PartitionBucket PartitionRootBase::gPagedBucket; | 49 PartitionBucket PartitionRootBase::gPagedBucket; |
| 73 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; | 50 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; |
| 74 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = | 51 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = |
| 75 nullptr; | 52 nullptr; |
| 76 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; | 53 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; |
| 77 | 54 |
| 78 static uint8_t partitionBucketNumSystemPages(size_t size) { | 55 static uint8_t partitionBucketNumSystemPages(size_t size) { |
| 79 // This works out reasonably for the current bucket sizes of the generic | 56 // This works out reasonably for the current bucket sizes of the generic |
| 80 // allocator, and the current values of partition page size and constants. | 57 // allocator, and the current values of partition page size and constants. |
| 81 // Specifically, we have enough room to always pack the slots perfectly into | 58 // Specifically, we have enough room to always pack the slots perfectly into |
| 82 // some number of system pages. The only waste is the waste associated with | 59 // some number of system pages. The only waste is the waste associated with |
| 83 // unfaulted pages (i.e. wasted address space). | 60 // unfaulted pages (i.e. wasted address space). |
| 84 // TODO: we end up using a lot of system pages for very small sizes. For | 61 // TODO: we end up using a lot of system pages for very small sizes. For |
| 85 // example, we'll use 12 system pages for slot size 24. The slot size is | 62 // example, we'll use 12 system pages for slot size 24. The slot size is |
| 86 // so small that the waste would be tiny with just 4, or 1, system pages. | 63 // so small that the waste would be tiny with just 4, or 1, system pages. |
| 87 // Later, we can investigate whether there are anti-fragmentation benefits | 64 // Later, we can investigate whether there are anti-fragmentation benefits |
| 88 // to using fewer system pages. | 65 // to using fewer system pages. |
| 89 double bestWasteRatio = 1.0f; | 66 double bestWasteRatio = 1.0f; |
| 90 uint16_t bestPages = 0; | 67 uint16_t bestPages = 0; |
| 91 if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { | 68 if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { |
| 92 ASSERT(!(size % kSystemPageSize)); | 69 DCHECK(!(size % kSystemPageSize)); |
| 93 bestPages = static_cast<uint16_t>(size / kSystemPageSize); | 70 bestPages = static_cast<uint16_t>(size / kSystemPageSize); |
| 94 RELEASE_ASSERT(bestPages < (1 << 8)); | 71 CHECK(bestPages < (1 << 8)); |
| 95 return static_cast<uint8_t>(bestPages); | 72 return static_cast<uint8_t>(bestPages); |
| 96 } | 73 } |
| 97 ASSERT(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); | 74 DCHECK(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); |
| 98 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; | 75 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; |
| 99 i <= kMaxSystemPagesPerSlotSpan; ++i) { | 76 i <= kMaxSystemPagesPerSlotSpan; ++i) { |
| 100 size_t pageSize = kSystemPageSize * i; | 77 size_t pageSize = kSystemPageSize * i; |
| 101 size_t numSlots = pageSize / size; | 78 size_t numSlots = pageSize / size; |
| 102 size_t waste = pageSize - (numSlots * size); | 79 size_t waste = pageSize - (numSlots * size); |
| 103 // Leaving a page unfaulted is not free; the page will occupy an empty page | 80 // Leaving a page unfaulted is not free; the page will occupy an empty page |
| 104 // table entry. Make a simple attempt to account for that. | 81 // table entry. Make a simple attempt to account for that. |
| 105 size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); | 82 size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); |
| 106 size_t numUnfaultedPages = | 83 size_t numUnfaultedPages = |
| 107 numRemainderPages | 84 numRemainderPages |
| 108 ? (kNumSystemPagesPerPartitionPage - numRemainderPages) | 85 ? (kNumSystemPagesPerPartitionPage - numRemainderPages) |
| 109 : 0; | 86 : 0; |
| 110 waste += sizeof(void*) * numUnfaultedPages; | 87 waste += sizeof(void*) * numUnfaultedPages; |
| 111 double wasteRatio = (double)waste / (double)pageSize; | 88 double wasteRatio = (double)waste / (double)pageSize; |
| 112 if (wasteRatio < bestWasteRatio) { | 89 if (wasteRatio < bestWasteRatio) { |
| 113 bestWasteRatio = wasteRatio; | 90 bestWasteRatio = wasteRatio; |
| 114 bestPages = i; | 91 bestPages = i; |
| 115 } | 92 } |
| 116 } | 93 } |
| 117 ASSERT(bestPages > 0); | 94 DCHECK(bestPages > 0); |
| 118 RELEASE_ASSERT(bestPages <= kMaxSystemPagesPerSlotSpan); | 95 CHECK(bestPages <= kMaxSystemPagesPerSlotSpan); |
| 119 return static_cast<uint8_t>(bestPages); | 96 return static_cast<uint8_t>(bestPages); |
| 120 } | 97 } |
| 121 | 98 |
| 122 static void partitionAllocBaseInit(PartitionRootBase* root) { | 99 static void partitionAllocBaseInit(PartitionRootBase* root) { |
| 123 ASSERT(!root->initialized); | 100 DCHECK(!root->initialized); |
| 124 { | 101 { |
| 125 SpinLock::Guard guard(PartitionRootBase::gInitializedLock); | 102 subtle::SpinLock::Guard guard(PartitionRootBase::gInitializedLock); |
| 126 if (!PartitionRootBase::gInitialized) { | 103 if (!PartitionRootBase::gInitialized) { |
| 127 PartitionRootBase::gInitialized = true; | 104 PartitionRootBase::gInitialized = true; |
| 128 // We mark the seed page as free to make sure it is skipped by our | 105 // We mark the seed page as free to make sure it is skipped by our |
| 129 // logic to find a new active page. | 106 // logic to find a new active page. |
| 130 PartitionRootBase::gPagedBucket.activePagesHead = | 107 PartitionRootBase::gPagedBucket.activePagesHead = |
| 131 &PartitionRootGeneric::gSeedPage; | 108 &PartitionRootGeneric::gSeedPage; |
| 132 } | 109 } |
| 133 } | 110 } |
| 134 | 111 |
| 135 root->initialized = true; | 112 root->initialized = true; |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 154 PartitionRootBase* root) { | 131 PartitionRootBase* root) { |
| 155 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | 132 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; |
| 156 bucket->emptyPagesHead = 0; | 133 bucket->emptyPagesHead = 0; |
| 157 bucket->decommittedPagesHead = 0; | 134 bucket->decommittedPagesHead = 0; |
| 158 bucket->numFullPages = 0; | 135 bucket->numFullPages = 0; |
| 159 bucket->numSystemPagesPerSlotSpan = | 136 bucket->numSystemPagesPerSlotSpan = |
| 160 partitionBucketNumSystemPages(bucket->slotSize); | 137 partitionBucketNumSystemPages(bucket->slotSize); |
| 161 } | 138 } |
| 162 | 139 |
| 163 void partitionAllocGlobalInit(void (*oomHandlingFunction)()) { | 140 void partitionAllocGlobalInit(void (*oomHandlingFunction)()) { |
| 164 ASSERT(oomHandlingFunction); | 141 DCHECK(oomHandlingFunction); |
| 165 PartitionRootBase::gOomHandlingFunction = oomHandlingFunction; | 142 PartitionRootBase::gOomHandlingFunction = oomHandlingFunction; |
| 166 } | 143 } |
| 167 | 144 |
| 168 void partitionAllocInit(PartitionRoot* root, | 145 void partitionAllocInit(PartitionRoot* root, |
| 169 size_t numBuckets, | 146 size_t numBuckets, |
| 170 size_t maxAllocation) { | 147 size_t maxAllocation) { |
| 171 partitionAllocBaseInit(root); | 148 partitionAllocBaseInit(root); |
| 172 | 149 |
| 173 root->numBuckets = numBuckets; | 150 root->numBuckets = numBuckets; |
| 174 root->maxAllocation = maxAllocation; | 151 root->maxAllocation = maxAllocation; |
| 175 size_t i; | 152 size_t i; |
| 176 for (i = 0; i < root->numBuckets; ++i) { | 153 for (i = 0; i < root->numBuckets; ++i) { |
| 177 PartitionBucket* bucket = &root->buckets()[i]; | 154 PartitionBucket* bucket = &root->buckets()[i]; |
| 178 if (!i) | 155 if (!i) |
| 179 bucket->slotSize = kAllocationGranularity; | 156 bucket->slotSize = kAllocationGranularity; |
| 180 else | 157 else |
| 181 bucket->slotSize = i << kBucketShift; | 158 bucket->slotSize = i << kBucketShift; |
| 182 partitionBucketInitBase(bucket, root); | 159 partitionBucketInitBase(bucket, root); |
| 183 } | 160 } |
| 184 } | 161 } |
| 185 | 162 |
| 186 void partitionAllocGenericInit(PartitionRootGeneric* root) { | 163 void partitionAllocGenericInit(PartitionRootGeneric* root) { |
| 187 SpinLock::Guard guard(root->lock); | 164 subtle::SpinLock::Guard guard(root->lock); |
| 188 | 165 |
| 189 partitionAllocBaseInit(root); | 166 partitionAllocBaseInit(root); |
| 190 | 167 |
| 191 // Precalculate some shift and mask constants used in the hot path. | 168 // Precalculate some shift and mask constants used in the hot path. |
| 192 // Example: malloc(41) == 101001 binary. | 169 // Example: malloc(41) == 101001 binary. |
| 193 // Order is 6 (1 << 6-1)==32 is highest bit set. | 170 // Order is 6 (1 << 6-1)==32 is highest bit set. |
| 194 // orderIndex is the next three MSB == 010 == 2. | 171 // orderIndex is the next three MSB == 010 == 2. |
| 195 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for | 172 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for |
| 196 // the subOrderIndex). | 173 // the subOrderIndex). |
| 197 size_t order; | 174 size_t order; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 230 bucket->slotSize = currentSize; | 207 bucket->slotSize = currentSize; |
| 231 partitionBucketInitBase(bucket, root); | 208 partitionBucketInitBase(bucket, root); |
| 232 // Disable psuedo buckets so that touching them faults. | 209 // Disable psuedo buckets so that touching them faults. |
| 233 if (currentSize % kGenericSmallestBucket) | 210 if (currentSize % kGenericSmallestBucket) |
| 234 bucket->activePagesHead = 0; | 211 bucket->activePagesHead = 0; |
| 235 currentSize += currentIncrement; | 212 currentSize += currentIncrement; |
| 236 ++bucket; | 213 ++bucket; |
| 237 } | 214 } |
| 238 currentIncrement <<= 1; | 215 currentIncrement <<= 1; |
| 239 } | 216 } |
| 240 ASSERT(currentSize == 1 << kGenericMaxBucketedOrder); | 217 DCHECK(currentSize == 1 << kGenericMaxBucketedOrder); |
| 241 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); | 218 DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); |
| 242 | 219 |
| 243 // Then set up the fast size -> bucket lookup table. | 220 // Then set up the fast size -> bucket lookup table. |
| 244 bucket = &root->buckets[0]; | 221 bucket = &root->buckets[0]; |
| 245 PartitionBucket** bucketPtr = &root->bucketLookups[0]; | 222 PartitionBucket** bucketPtr = &root->bucketLookups[0]; |
| 246 for (order = 0; order <= kBitsPerSizet; ++order) { | 223 for (order = 0; order <= kBitsPerSizet; ++order) { |
| 247 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { | 224 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { |
| 248 if (order < kGenericMinBucketedOrder) { | 225 if (order < kGenericMinBucketedOrder) { |
| 249 // Use the bucket of the finest granularity for malloc(0) etc. | 226 // Use the bucket of the finest granularity for malloc(0) etc. |
| 250 *bucketPtr++ = &root->buckets[0]; | 227 *bucketPtr++ = &root->buckets[0]; |
| 251 } else if (order > kGenericMaxBucketedOrder) { | 228 } else if (order > kGenericMaxBucketedOrder) { |
| 252 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket; | 229 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket; |
| 253 } else { | 230 } else { |
| 254 PartitionBucket* validBucket = bucket; | 231 PartitionBucket* validBucket = bucket; |
| 255 // Skip over invalid buckets. | 232 // Skip over invalid buckets. |
| 256 while (validBucket->slotSize % kGenericSmallestBucket) | 233 while (validBucket->slotSize % kGenericSmallestBucket) |
| 257 validBucket++; | 234 validBucket++; |
| 258 *bucketPtr++ = validBucket; | 235 *bucketPtr++ = validBucket; |
| 259 bucket++; | 236 bucket++; |
| 260 } | 237 } |
| 261 } | 238 } |
| 262 } | 239 } |
| 263 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); | 240 DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); |
| 264 ASSERT(bucketPtr == | 241 DCHECK(bucketPtr == |
| 265 &root->bucketLookups[0] + | 242 &root->bucketLookups[0] + |
| 266 ((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder)); | 243 ((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder)); |
| 267 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), | 244 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), |
| 268 // which tries to overflow to a non-existant order. | 245 // which tries to overflow to a non-existant order. |
| 269 *bucketPtr = &PartitionRootGeneric::gPagedBucket; | 246 *bucketPtr = &PartitionRootGeneric::gPagedBucket; |
| 270 } | 247 } |
| 271 | 248 |
| 272 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) { | 249 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) { |
| 273 // Failure here indicates a memory leak. | 250 // Failure here indicates a memory leak. |
| 274 bool foundLeak = bucket->numFullPages; | 251 bool foundLeak = bucket->numFullPages; |
| 275 for (PartitionPage* page = bucket->activePagesHead; page; | 252 for (PartitionPage* page = bucket->activePagesHead; page; |
| 276 page = page->nextPage) | 253 page = page->nextPage) |
| 277 foundLeak |= (page->numAllocatedSlots > 0); | 254 foundLeak |= (page->numAllocatedSlots > 0); |
| 278 return foundLeak; | 255 return foundLeak; |
| 279 } | 256 } |
| 280 | 257 |
| 281 static bool partitionAllocBaseShutdown(PartitionRootBase* root) { | 258 static bool partitionAllocBaseShutdown(PartitionRootBase* root) { |
| 282 ASSERT(root->initialized); | 259 DCHECK(root->initialized); |
| 283 root->initialized = false; | 260 root->initialized = false; |
| 284 | 261 |
| 285 // Now that we've examined all partition pages in all buckets, it's safe | 262 // Now that we've examined all partition pages in all buckets, it's safe |
| 286 // to free all our super pages. Since the super page extent entries are | 263 // to free all our super pages. Since the super page extent entries are |
| 287 // stored in the super pages, we need to be careful not to access them | 264 // stored in the super pages, we need to be careful not to access them |
| 288 // after we've released the corresponding super page. | 265 // after we've released the corresponding super page. |
| 289 PartitionSuperPageExtentEntry* entry = root->firstExtent; | 266 PartitionSuperPageExtentEntry* entry = root->firstExtent; |
| 290 while (entry) { | 267 while (entry) { |
| 291 PartitionSuperPageExtentEntry* nextEntry = entry->next; | 268 PartitionSuperPageExtentEntry* nextEntry = entry->next; |
| 292 char* superPage = entry->superPageBase; | 269 char* superPage = entry->superPageBase; |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 305 size_t i; | 282 size_t i; |
| 306 for (i = 0; i < root->numBuckets; ++i) { | 283 for (i = 0; i < root->numBuckets; ++i) { |
| 307 PartitionBucket* bucket = &root->buckets()[i]; | 284 PartitionBucket* bucket = &root->buckets()[i]; |
| 308 foundLeak |= partitionAllocShutdownBucket(bucket); | 285 foundLeak |= partitionAllocShutdownBucket(bucket); |
| 309 } | 286 } |
| 310 foundLeak |= partitionAllocBaseShutdown(root); | 287 foundLeak |= partitionAllocBaseShutdown(root); |
| 311 return !foundLeak; | 288 return !foundLeak; |
| 312 } | 289 } |
| 313 | 290 |
| 314 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) { | 291 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) { |
| 315 SpinLock::Guard guard(root->lock); | 292 subtle::SpinLock::Guard guard(root->lock); |
| 316 bool foundLeak = false; | 293 bool foundLeak = false; |
| 317 size_t i; | 294 size_t i; |
| 318 for (i = 0; i < kGenericNumBuckets; ++i) { | 295 for (i = 0; i < kGenericNumBuckets; ++i) { |
| 319 PartitionBucket* bucket = &root->buckets[i]; | 296 PartitionBucket* bucket = &root->buckets[i]; |
| 320 foundLeak |= partitionAllocShutdownBucket(bucket); | 297 foundLeak |= partitionAllocShutdownBucket(bucket); |
| 321 } | 298 } |
| 322 foundLeak |= partitionAllocBaseShutdown(root); | 299 foundLeak |= partitionAllocBaseShutdown(root); |
| 323 return !foundLeak; | 300 return !foundLeak; |
| 324 } | 301 } |
| 325 | 302 |
| 326 #if !CPU(64BIT) | 303 #if !CPU(64BIT) |
| 327 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() { | 304 static NOINLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() { |
| 328 OOM_CRASH(); | 305 OOM_CRASH(); |
| 329 } | 306 } |
| 330 #endif | 307 #endif |
| 331 | 308 |
| 332 static NEVER_INLINE void partitionOutOfMemory(const PartitionRootBase* root) { | 309 static NOINLINE void partitionOutOfMemory(const PartitionRootBase* root) { |
| 333 #if !CPU(64BIT) | 310 #if !CPU(64BIT) |
| 334 // Check whether this OOM is due to a lot of super pages that are allocated | 311 // Check whether this OOM is due to a lot of super pages that are allocated |
| 335 // but not committed, probably due to http://crbug.com/421387. | 312 // but not committed, probably due to http://crbug.com/421387. |
| 336 if (root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages - | 313 if (root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages - |
| 337 root->totalSizeOfCommittedPages > | 314 root->totalSizeOfCommittedPages > |
| 338 kReasonableSizeOfUnusedPages) { | 315 kReasonableSizeOfUnusedPages) { |
| 339 partitionOutOfMemoryWithLotsOfUncommitedPages(); | 316 partitionOutOfMemoryWithLotsOfUncommitedPages(); |
| 340 } | 317 } |
| 341 #endif | 318 #endif |
| 342 if (PartitionRootBase::gOomHandlingFunction) | 319 if (PartitionRootBase::gOomHandlingFunction) |
| 343 (*PartitionRootBase::gOomHandlingFunction)(); | 320 (*PartitionRootBase::gOomHandlingFunction)(); |
| 344 OOM_CRASH(); | 321 OOM_CRASH(); |
| 345 } | 322 } |
| 346 | 323 |
| 347 static NEVER_INLINE void partitionExcessiveAllocationSize() { | 324 static NOINLINE void partitionExcessiveAllocationSize() { |
| 348 OOM_CRASH(); | 325 OOM_CRASH(); |
| 349 } | 326 } |
| 350 | 327 |
| 351 static NEVER_INLINE void partitionBucketFull() { | 328 static NOINLINE void partitionBucketFull() { |
| 352 OOM_CRASH(); | 329 OOM_CRASH(); |
| 353 } | 330 } |
| 354 | 331 |
| 355 // partitionPageStateIs* | 332 // partitionPageStateIs* |
| 356 // Note that it's only valid to call these functions on pages found on one of | 333 // Note that it's only valid to call these functions on pages found on one of |
| 357 // the page lists. Specifically, you can't call these functions on full pages | 334 // the page lists. Specifically, you can't call these functions on full pages |
| 358 // that were detached from the active list. | 335 // that were detached from the active list. |
| 359 static bool ALWAYS_INLINE | 336 static bool ALWAYS_INLINE |
| 360 partitionPageStateIsActive(const PartitionPage* page) { | 337 partitionPageStateIsActive(const PartitionPage* page) { |
| 361 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 338 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 362 ASSERT(!page->pageOffset); | 339 DCHECK(!page->pageOffset); |
| 363 return (page->numAllocatedSlots > 0 && | 340 return (page->numAllocatedSlots > 0 && |
| 364 (page->freelistHead || page->numUnprovisionedSlots)); | 341 (page->freelistHead || page->numUnprovisionedSlots)); |
| 365 } | 342 } |
| 366 | 343 |
| 367 static bool ALWAYS_INLINE partitionPageStateIsFull(const PartitionPage* page) { | 344 static bool ALWAYS_INLINE partitionPageStateIsFull(const PartitionPage* page) { |
| 368 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 345 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 369 ASSERT(!page->pageOffset); | 346 DCHECK(!page->pageOffset); |
| 370 bool ret = (page->numAllocatedSlots == partitionBucketSlots(page->bucket)); | 347 bool ret = (page->numAllocatedSlots == partitionBucketSlots(page->bucket)); |
| 371 if (ret) { | 348 if (ret) { |
| 372 ASSERT(!page->freelistHead); | 349 DCHECK(!page->freelistHead); |
| 373 ASSERT(!page->numUnprovisionedSlots); | 350 DCHECK(!page->numUnprovisionedSlots); |
| 374 } | 351 } |
| 375 return ret; | 352 return ret; |
| 376 } | 353 } |
| 377 | 354 |
| 378 static bool ALWAYS_INLINE partitionPageStateIsEmpty(const PartitionPage* page) { | 355 static bool ALWAYS_INLINE partitionPageStateIsEmpty(const PartitionPage* page) { |
| 379 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 356 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 380 ASSERT(!page->pageOffset); | 357 DCHECK(!page->pageOffset); |
| 381 return (!page->numAllocatedSlots && page->freelistHead); | 358 return (!page->numAllocatedSlots && page->freelistHead); |
| 382 } | 359 } |
| 383 | 360 |
| 384 static bool ALWAYS_INLINE | 361 static bool ALWAYS_INLINE |
| 385 partitionPageStateIsDecommitted(const PartitionPage* page) { | 362 partitionPageStateIsDecommitted(const PartitionPage* page) { |
| 386 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 363 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 387 ASSERT(!page->pageOffset); | 364 DCHECK(!page->pageOffset); |
| 388 bool ret = (!page->numAllocatedSlots && !page->freelistHead); | 365 bool ret = (!page->numAllocatedSlots && !page->freelistHead); |
| 389 if (ret) { | 366 if (ret) { |
| 390 ASSERT(!page->numUnprovisionedSlots); | 367 DCHECK(!page->numUnprovisionedSlots); |
| 391 ASSERT(page->emptyCacheIndex == -1); | 368 DCHECK(page->emptyCacheIndex == -1); |
| 392 } | 369 } |
| 393 return ret; | 370 return ret; |
| 394 } | 371 } |
| 395 | 372 |
| 396 static void partitionIncreaseCommittedPages(PartitionRootBase* root, | 373 static void partitionIncreaseCommittedPages(PartitionRootBase* root, |
| 397 size_t len) { | 374 size_t len) { |
| 398 root->totalSizeOfCommittedPages += len; | 375 root->totalSizeOfCommittedPages += len; |
| 399 ASSERT(root->totalSizeOfCommittedPages <= | 376 DCHECK(root->totalSizeOfCommittedPages <= |
| 400 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); | 377 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); |
| 401 } | 378 } |
| 402 | 379 |
| 403 static void partitionDecreaseCommittedPages(PartitionRootBase* root, | 380 static void partitionDecreaseCommittedPages(PartitionRootBase* root, |
| 404 size_t len) { | 381 size_t len) { |
| 405 root->totalSizeOfCommittedPages -= len; | 382 root->totalSizeOfCommittedPages -= len; |
| 406 ASSERT(root->totalSizeOfCommittedPages <= | 383 DCHECK(root->totalSizeOfCommittedPages <= |
| 407 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); | 384 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); |
| 408 } | 385 } |
| 409 | 386 |
| 410 static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, | 387 static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, |
| 411 void* addr, | 388 void* addr, |
| 412 size_t len) { | 389 size_t len) { |
| 413 decommitSystemPages(addr, len); | 390 decommitSystemPages(addr, len); |
| 414 partitionDecreaseCommittedPages(root, len); | 391 partitionDecreaseCommittedPages(root, len); |
| 415 } | 392 } |
| 416 | 393 |
| 417 static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, | 394 static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, |
| 418 void* addr, | 395 void* addr, |
| 419 size_t len) { | 396 size_t len) { |
| 420 recommitSystemPages(addr, len); | 397 recommitSystemPages(addr, len); |
| 421 partitionIncreaseCommittedPages(root, len); | 398 partitionIncreaseCommittedPages(root, len); |
| 422 } | 399 } |
| 423 | 400 |
| 424 static ALWAYS_INLINE void* partitionAllocPartitionPages( | 401 static ALWAYS_INLINE void* partitionAllocPartitionPages( |
| 425 PartitionRootBase* root, | 402 PartitionRootBase* root, |
| 426 int flags, | 403 int flags, |
| 427 uint16_t numPartitionPages) { | 404 uint16_t numPartitionPages) { |
| 428 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % | 405 DCHECK(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % |
| 429 kPartitionPageSize)); | 406 kPartitionPageSize)); |
| 430 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % | 407 DCHECK(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % |
| 431 kPartitionPageSize)); | 408 kPartitionPageSize)); |
| 432 ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage); | 409 DCHECK(numPartitionPages <= kNumPartitionPagesPerSuperPage); |
| 433 size_t totalSize = kPartitionPageSize * numPartitionPages; | 410 size_t totalSize = kPartitionPageSize * numPartitionPages; |
| 434 size_t numPartitionPagesLeft = | 411 size_t numPartitionPagesLeft = |
| 435 (root->nextPartitionPageEnd - root->nextPartitionPage) >> | 412 (root->nextPartitionPageEnd - root->nextPartitionPage) >> |
| 436 kPartitionPageShift; | 413 kPartitionPageShift; |
| 437 if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { | 414 if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { |
| 438 // In this case, we can still hand out pages from the current super page | 415 // In this case, we can still hand out pages from the current super page |
| 439 // allocation. | 416 // allocation. |
| 440 char* ret = root->nextPartitionPage; | 417 char* ret = root->nextPartitionPage; |
| 441 root->nextPartitionPage += totalSize; | 418 root->nextPartitionPage += totalSize; |
| 442 partitionIncreaseCommittedPages(root, totalSize); | 419 partitionIncreaseCommittedPages(root, totalSize); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 492 // are unused, but we initialize them to 0 so that we get a clear signal | 469 // are unused, but we initialize them to 0 so that we get a clear signal |
| 493 // in case they are accidentally used. | 470 // in case they are accidentally used. |
| 494 latestExtent->superPageBase = 0; | 471 latestExtent->superPageBase = 0; |
| 495 latestExtent->superPagesEnd = 0; | 472 latestExtent->superPagesEnd = 0; |
| 496 latestExtent->next = 0; | 473 latestExtent->next = 0; |
| 497 | 474 |
| 498 PartitionSuperPageExtentEntry* currentExtent = root->currentExtent; | 475 PartitionSuperPageExtentEntry* currentExtent = root->currentExtent; |
| 499 bool isNewExtent = (superPage != requestedAddress); | 476 bool isNewExtent = (superPage != requestedAddress); |
| 500 if (UNLIKELY(isNewExtent)) { | 477 if (UNLIKELY(isNewExtent)) { |
| 501 if (UNLIKELY(!currentExtent)) { | 478 if (UNLIKELY(!currentExtent)) { |
| 502 ASSERT(!root->firstExtent); | 479 DCHECK(!root->firstExtent); |
| 503 root->firstExtent = latestExtent; | 480 root->firstExtent = latestExtent; |
| 504 } else { | 481 } else { |
| 505 ASSERT(currentExtent->superPageBase); | 482 DCHECK(currentExtent->superPageBase); |
| 506 currentExtent->next = latestExtent; | 483 currentExtent->next = latestExtent; |
| 507 } | 484 } |
| 508 root->currentExtent = latestExtent; | 485 root->currentExtent = latestExtent; |
| 509 latestExtent->superPageBase = superPage; | 486 latestExtent->superPageBase = superPage; |
| 510 latestExtent->superPagesEnd = superPage + kSuperPageSize; | 487 latestExtent->superPagesEnd = superPage + kSuperPageSize; |
| 511 } else { | 488 } else { |
| 512 // We allocated next to an existing extent so just nudge the size up a | 489 // We allocated next to an existing extent so just nudge the size up a |
| 513 // little. | 490 // little. |
| 514 ASSERT(currentExtent->superPagesEnd); | 491 DCHECK(currentExtent->superPagesEnd); |
| 515 currentExtent->superPagesEnd += kSuperPageSize; | 492 currentExtent->superPagesEnd += kSuperPageSize; |
| 516 ASSERT(ret >= currentExtent->superPageBase && | 493 DCHECK(ret >= currentExtent->superPageBase && |
| 517 ret < currentExtent->superPagesEnd); | 494 ret < currentExtent->superPagesEnd); |
| 518 } | 495 } |
| 519 return ret; | 496 return ret; |
| 520 } | 497 } |
| 521 | 498 |
| 522 static ALWAYS_INLINE uint16_t | 499 static ALWAYS_INLINE uint16_t |
| 523 partitionBucketPartitionPages(const PartitionBucket* bucket) { | 500 partitionBucketPartitionPages(const PartitionBucket* bucket) { |
| 524 return (bucket->numSystemPagesPerSlotSpan + | 501 return (bucket->numSystemPagesPerSlotSpan + |
| 525 (kNumSystemPagesPerPartitionPage - 1)) / | 502 (kNumSystemPagesPerPartitionPage - 1)) / |
| 526 kNumSystemPagesPerPartitionPage; | 503 kNumSystemPagesPerPartitionPage; |
| 527 } | 504 } |
| 528 | 505 |
| 529 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) { | 506 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) { |
| 530 ASSERT(partitionPageStateIsDecommitted(page)); | 507 DCHECK(partitionPageStateIsDecommitted(page)); |
| 531 | 508 |
| 532 page->numUnprovisionedSlots = partitionBucketSlots(page->bucket); | 509 page->numUnprovisionedSlots = partitionBucketSlots(page->bucket); |
| 533 ASSERT(page->numUnprovisionedSlots); | 510 DCHECK(page->numUnprovisionedSlots); |
| 534 | 511 |
| 535 page->nextPage = nullptr; | 512 page->nextPage = nullptr; |
| 536 } | 513 } |
| 537 | 514 |
| 538 static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, | 515 static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, |
| 539 PartitionBucket* bucket) { | 516 PartitionBucket* bucket) { |
| 540 // The bucket never changes. We set it up once. | 517 // The bucket never changes. We set it up once. |
| 541 page->bucket = bucket; | 518 page->bucket = bucket; |
| 542 page->emptyCacheIndex = -1; | 519 page->emptyCacheIndex = -1; |
| 543 | 520 |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 554 for (uint16_t i = 1; i < numPartitionPages; ++i) { | 531 for (uint16_t i = 1; i < numPartitionPages; ++i) { |
| 555 pageCharPtr += kPageMetadataSize; | 532 pageCharPtr += kPageMetadataSize; |
| 556 PartitionPage* secondaryPage = | 533 PartitionPage* secondaryPage = |
| 557 reinterpret_cast<PartitionPage*>(pageCharPtr); | 534 reinterpret_cast<PartitionPage*>(pageCharPtr); |
| 558 secondaryPage->pageOffset = i; | 535 secondaryPage->pageOffset = i; |
| 559 } | 536 } |
| 560 } | 537 } |
| 561 | 538 |
| 562 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist( | 539 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist( |
| 563 PartitionPage* page) { | 540 PartitionPage* page) { |
| 564 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 541 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 565 uint16_t numSlots = page->numUnprovisionedSlots; | 542 uint16_t numSlots = page->numUnprovisionedSlots; |
| 566 ASSERT(numSlots); | 543 DCHECK(numSlots); |
| 567 PartitionBucket* bucket = page->bucket; | 544 PartitionBucket* bucket = page->bucket; |
| 568 // We should only get here when _every_ slot is either used or unprovisioned. | 545 // We should only get here when _every_ slot is either used or unprovisioned. |
| 569 // (The third state is "on the freelist". If we have a non-empty freelist, we | 546 // (The third state is "on the freelist". If we have a non-empty freelist, we |
| 570 // should not get here.) | 547 // should not get here.) |
| 571 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); | 548 DCHECK(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); |
| 572 // Similarly, make explicitly sure that the freelist is empty. | 549 // Similarly, make explicitly sure that the freelist is empty. |
| 573 ASSERT(!page->freelistHead); | 550 DCHECK(!page->freelistHead); |
| 574 ASSERT(page->numAllocatedSlots >= 0); | 551 DCHECK(page->numAllocatedSlots >= 0); |
| 575 | 552 |
| 576 size_t size = bucket->slotSize; | 553 size_t size = bucket->slotSize; |
| 577 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); | 554 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 578 char* returnObject = base + (size * page->numAllocatedSlots); | 555 char* returnObject = base + (size * page->numAllocatedSlots); |
| 579 char* firstFreelistPointer = returnObject + size; | 556 char* firstFreelistPointer = returnObject + size; |
| 580 char* firstFreelistPointerExtent = | 557 char* firstFreelistPointerExtent = |
| 581 firstFreelistPointer + sizeof(PartitionFreelistEntry*); | 558 firstFreelistPointer + sizeof(PartitionFreelistEntry*); |
| 582 // Our goal is to fault as few system pages as possible. We calculate the | 559 // Our goal is to fault as few system pages as possible. We calculate the |
| 583 // page containing the "end" of the returned slot, and then allow freelist | 560 // page containing the "end" of the returned slot, and then allow freelist |
| 584 // pointers to be written up to the end of that page. | 561 // pointers to be written up to the end of that page. |
| 585 char* subPageLimit = reinterpret_cast<char*>( | 562 char* subPageLimit = reinterpret_cast<char*>(base::roundUpToSystemPage( |
|
Primiano Tucci (use gerrit)
2016/11/22 14:28:32
cit: "drop the 'base::', just roundUpToSystemPage,
palmer
2016/11/24 01:05:56
Done.
| |
| 586 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer))); | 563 reinterpret_cast<size_t>(firstFreelistPointer))); |
| 587 char* slotsLimit = returnObject + (size * numSlots); | 564 char* slotsLimit = returnObject + (size * numSlots); |
| 588 char* freelistLimit = subPageLimit; | 565 char* freelistLimit = subPageLimit; |
| 589 if (UNLIKELY(slotsLimit < freelistLimit)) | 566 if (UNLIKELY(slotsLimit < freelistLimit)) |
| 590 freelistLimit = slotsLimit; | 567 freelistLimit = slotsLimit; |
| 591 | 568 |
| 592 uint16_t numNewFreelistEntries = 0; | 569 uint16_t numNewFreelistEntries = 0; |
| 593 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { | 570 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { |
| 594 // Only consider used space in the slot span. If we consider wasted | 571 // Only consider used space in the slot span. If we consider wasted |
| 595 // space, we may get an off-by-one when a freelist pointer fits in the | 572 // space, we may get an off-by-one when a freelist pointer fits in the |
| 596 // wasted space, but a slot does not. | 573 // wasted space, but a slot does not. |
| 597 // We know we can fit at least one freelist pointer. | 574 // We know we can fit at least one freelist pointer. |
| 598 numNewFreelistEntries = 1; | 575 numNewFreelistEntries = 1; |
| 599 // Any further entries require space for the whole slot span. | 576 // Any further entries require space for the whole slot span. |
| 600 numNewFreelistEntries += static_cast<uint16_t>( | 577 numNewFreelistEntries += static_cast<uint16_t>( |
| 601 (freelistLimit - firstFreelistPointerExtent) / size); | 578 (freelistLimit - firstFreelistPointerExtent) / size); |
| 602 } | 579 } |
| 603 | 580 |
| 604 // We always return an object slot -- that's the +1 below. | 581 // We always return an object slot -- that's the +1 below. |
| 605 // We do not neccessarily create any new freelist entries, because we cross | 582 // We do not neccessarily create any new freelist entries, because we cross |
| 606 // sub page boundaries frequently for large bucket sizes. | 583 // sub page boundaries frequently for large bucket sizes. |
| 607 ASSERT(numNewFreelistEntries + 1 <= numSlots); | 584 DCHECK(numNewFreelistEntries + 1 <= numSlots); |
| 608 numSlots -= (numNewFreelistEntries + 1); | 585 numSlots -= (numNewFreelistEntries + 1); |
| 609 page->numUnprovisionedSlots = numSlots; | 586 page->numUnprovisionedSlots = numSlots; |
| 610 page->numAllocatedSlots++; | 587 page->numAllocatedSlots++; |
| 611 | 588 |
| 612 if (LIKELY(numNewFreelistEntries)) { | 589 if (LIKELY(numNewFreelistEntries)) { |
| 613 char* freelistPointer = firstFreelistPointer; | 590 char* freelistPointer = firstFreelistPointer; |
| 614 PartitionFreelistEntry* entry = | 591 PartitionFreelistEntry* entry = |
| 615 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); | 592 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); |
| 616 page->freelistHead = entry; | 593 page->freelistHead = entry; |
| 617 while (--numNewFreelistEntries) { | 594 while (--numNewFreelistEntries) { |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 638 // decommitted page list and full pages are unlinked from any list. | 615 // decommitted page list and full pages are unlinked from any list. |
| 639 static bool partitionSetNewActivePage(PartitionBucket* bucket) { | 616 static bool partitionSetNewActivePage(PartitionBucket* bucket) { |
| 640 PartitionPage* page = bucket->activePagesHead; | 617 PartitionPage* page = bucket->activePagesHead; |
| 641 if (page == &PartitionRootBase::gSeedPage) | 618 if (page == &PartitionRootBase::gSeedPage) |
| 642 return false; | 619 return false; |
| 643 | 620 |
| 644 PartitionPage* nextPage; | 621 PartitionPage* nextPage; |
| 645 | 622 |
| 646 for (; page; page = nextPage) { | 623 for (; page; page = nextPage) { |
| 647 nextPage = page->nextPage; | 624 nextPage = page->nextPage; |
| 648 ASSERT(page->bucket == bucket); | 625 DCHECK(page->bucket == bucket); |
| 649 ASSERT(page != bucket->emptyPagesHead); | 626 DCHECK(page != bucket->emptyPagesHead); |
| 650 ASSERT(page != bucket->decommittedPagesHead); | 627 DCHECK(page != bucket->decommittedPagesHead); |
| 651 | 628 |
| 652 // Deal with empty and decommitted pages. | 629 // Deal with empty and decommitted pages. |
| 653 if (LIKELY(partitionPageStateIsActive(page))) { | 630 if (LIKELY(partitionPageStateIsActive(page))) { |
| 654 // This page is usable because it has freelist entries, or has | 631 // This page is usable because it has freelist entries, or has |
| 655 // unprovisioned slots we can create freelist entries from. | 632 // unprovisioned slots we can create freelist entries from. |
| 656 bucket->activePagesHead = page; | 633 bucket->activePagesHead = page; |
| 657 return true; | 634 return true; |
| 658 } | 635 } |
| 659 if (LIKELY(partitionPageStateIsEmpty(page))) { | 636 if (LIKELY(partitionPageStateIsEmpty(page))) { |
| 660 page->nextPage = bucket->emptyPagesHead; | 637 page->nextPage = bucket->emptyPagesHead; |
| 661 bucket->emptyPagesHead = page; | 638 bucket->emptyPagesHead = page; |
| 662 } else if (LIKELY(partitionPageStateIsDecommitted(page))) { | 639 } else if (LIKELY(partitionPageStateIsDecommitted(page))) { |
| 663 page->nextPage = bucket->decommittedPagesHead; | 640 page->nextPage = bucket->decommittedPagesHead; |
| 664 bucket->decommittedPagesHead = page; | 641 bucket->decommittedPagesHead = page; |
| 665 } else { | 642 } else { |
| 666 ASSERT(partitionPageStateIsFull(page)); | 643 DCHECK(partitionPageStateIsFull(page)); |
| 667 // If we get here, we found a full page. Skip over it too, and also | 644 // If we get here, we found a full page. Skip over it too, and also |
| 668 // tag it as full (via a negative value). We need it tagged so that | 645 // tag it as full (via a negative value). We need it tagged so that |
| 669 // free'ing can tell, and move it back into the active page list. | 646 // free'ing can tell, and move it back into the active page list. |
| 670 page->numAllocatedSlots = -page->numAllocatedSlots; | 647 page->numAllocatedSlots = -page->numAllocatedSlots; |
| 671 ++bucket->numFullPages; | 648 ++bucket->numFullPages; |
| 672 // numFullPages is a uint16_t for efficient packing so guard against | 649 // numFullPages is a uint16_t for efficient packing so guard against |
| 673 // overflow to be safe. | 650 // overflow to be safe. |
| 674 if (UNLIKELY(!bucket->numFullPages)) | 651 if (UNLIKELY(!bucket->numFullPages)) |
| 675 partitionBucketFull(); | 652 partitionBucketFull(); |
| 676 // Not necessary but might help stop accidents. | 653 // Not necessary but might help stop accidents. |
| 677 page->nextPage = 0; | 654 page->nextPage = 0; |
| 678 } | 655 } |
| 679 } | 656 } |
| 680 | 657 |
| 681 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | 658 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; |
| 682 return false; | 659 return false; |
| 683 } | 660 } |
| 684 | 661 |
| 685 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent( | 662 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent( |
| 686 PartitionPage* page) { | 663 PartitionPage* page) { |
| 687 ASSERT(partitionBucketIsDirectMapped(page->bucket)); | 664 DCHECK(partitionBucketIsDirectMapped(page->bucket)); |
| 688 return reinterpret_cast<PartitionDirectMapExtent*>( | 665 return reinterpret_cast<PartitionDirectMapExtent*>( |
| 689 reinterpret_cast<char*>(page) + 3 * kPageMetadataSize); | 666 reinterpret_cast<char*>(page) + 3 * kPageMetadataSize); |
| 690 } | 667 } |
| 691 | 668 |
| 692 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, | 669 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, |
| 693 size_t size) { | 670 size_t size) { |
| 694 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | 671 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); |
| 695 if (UNLIKELY(rawSizePtr != nullptr)) | 672 if (UNLIKELY(rawSizePtr != nullptr)) |
| 696 *rawSizePtr = size; | 673 *rawSizePtr = size; |
| 697 } | 674 } |
| 698 | 675 |
| 699 static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root, | 676 static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root, |
| 700 int flags, | 677 int flags, |
| 701 size_t rawSize) { | 678 size_t rawSize) { |
| 702 size_t size = partitionDirectMapSize(rawSize); | 679 size_t size = partitionDirectMapSize(rawSize); |
| 703 | 680 |
| 704 // Because we need to fake looking like a super page, we need to allocate | 681 // Because we need to fake looking like a super page, we need to allocate |
| 705 // a bunch of system pages more than "size": | 682 // a bunch of system pages more than "size": |
| 706 // - The first few system pages are the partition page in which the super | 683 // - The first few system pages are the partition page in which the super |
| 707 // page metadata is stored. We fault just one system page out of a partition | 684 // page metadata is stored. We fault just one system page out of a partition |
| 708 // page sized clump. | 685 // page sized clump. |
| 709 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the | 686 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the |
| 710 // massive address space plus randomization instead). | 687 // massive address space plus randomization instead). |
| 711 size_t mapSize = size + kPartitionPageSize; | 688 size_t mapSize = size + kPartitionPageSize; |
| 712 #if !CPU(64BIT) | 689 #if !CPU(64BIT) |
|
Primiano Tucci (use gerrit)
2016/11/22 14:28:32
defined(ARCH_CPU_64_BITS)
| |
| 713 mapSize += kSystemPageSize; | 690 mapSize += kSystemPageSize; |
| 714 #endif | 691 #endif |
| 715 // Round up to the allocation granularity. | 692 // Round up to the allocation granularity. |
| 716 mapSize += kPageAllocationGranularityOffsetMask; | 693 mapSize += kPageAllocationGranularityOffsetMask; |
| 717 mapSize &= kPageAllocationGranularityBaseMask; | 694 mapSize &= kPageAllocationGranularityBaseMask; |
| 718 | 695 |
| 719 // TODO: these pages will be zero-filled. Consider internalizing an | 696 // TODO: these pages will be zero-filled. Consider internalizing an |
| 720 // allocZeroed() API so we can avoid a memset() entirely in this case. | 697 // allocZeroed() API so we can avoid a memset() entirely in this case. |
| 721 char* ptr = reinterpret_cast<char*>( | 698 char* ptr = reinterpret_cast<char*>( |
| 722 allocPages(0, mapSize, kSuperPageSize, PageAccessible)); | 699 allocPages(0, mapSize, kSuperPageSize, PageAccessible)); |
| 723 if (UNLIKELY(!ptr)) | 700 if (UNLIKELY(!ptr)) |
| 724 return nullptr; | 701 return nullptr; |
| 725 | 702 |
| 726 size_t committedPageSize = size + kSystemPageSize; | 703 size_t committedPageSize = size + kSystemPageSize; |
| 727 root->totalSizeOfDirectMappedPages += committedPageSize; | 704 root->totalSizeOfDirectMappedPages += committedPageSize; |
| 728 partitionIncreaseCommittedPages(root, committedPageSize); | 705 partitionIncreaseCommittedPages(root, committedPageSize); |
| 729 | 706 |
| 730 char* slot = ptr + kPartitionPageSize; | 707 char* slot = ptr + kPartitionPageSize; |
| 731 setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), | 708 setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), |
| 732 kPartitionPageSize - (kSystemPageSize * 2)); | 709 kPartitionPageSize - (kSystemPageSize * 2)); |
| 733 #if !CPU(64BIT) | 710 #if !CPU(64BIT) |
|
Primiano Tucci (use gerrit)
2016/11/22 14:28:33
defined(ARCH_CPU_64_BITS)
| |
| 734 setSystemPagesInaccessible(ptr, kSystemPageSize); | 711 setSystemPagesInaccessible(ptr, kSystemPageSize); |
| 735 setSystemPagesInaccessible(slot + size, kSystemPageSize); | 712 setSystemPagesInaccessible(slot + size, kSystemPageSize); |
| 736 #endif | 713 #endif |
| 737 | 714 |
| 738 PartitionSuperPageExtentEntry* extent = | 715 PartitionSuperPageExtentEntry* extent = |
| 739 reinterpret_cast<PartitionSuperPageExtentEntry*>( | 716 reinterpret_cast<PartitionSuperPageExtentEntry*>( |
| 740 partitionSuperPageToMetadataArea(ptr)); | 717 partitionSuperPageToMetadataArea(ptr)); |
| 741 extent->root = root; | 718 extent->root = root; |
| 742 // The new structures are all located inside a fresh system page so they | 719 // The new structures are all located inside a fresh system page so they |
| 743 // will all be zeroed out. These ASSERTs are for documentation. | 720 // will all be zeroed out. These DCHECKs are for documentation. |
| 744 ASSERT(!extent->superPageBase); | 721 DCHECK(!extent->superPageBase); |
| 745 ASSERT(!extent->superPagesEnd); | 722 DCHECK(!extent->superPagesEnd); |
| 746 ASSERT(!extent->next); | 723 DCHECK(!extent->next); |
| 747 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(slot); | 724 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(slot); |
| 748 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>( | 725 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>( |
| 749 reinterpret_cast<char*>(page) + (kPageMetadataSize * 2)); | 726 reinterpret_cast<char*>(page) + (kPageMetadataSize * 2)); |
| 750 ASSERT(!page->nextPage); | 727 DCHECK(!page->nextPage); |
| 751 ASSERT(!page->numAllocatedSlots); | 728 DCHECK(!page->numAllocatedSlots); |
| 752 ASSERT(!page->numUnprovisionedSlots); | 729 DCHECK(!page->numUnprovisionedSlots); |
| 753 ASSERT(!page->pageOffset); | 730 DCHECK(!page->pageOffset); |
| 754 ASSERT(!page->emptyCacheIndex); | 731 DCHECK(!page->emptyCacheIndex); |
| 755 page->bucket = bucket; | 732 page->bucket = bucket; |
| 756 page->freelistHead = reinterpret_cast<PartitionFreelistEntry*>(slot); | 733 page->freelistHead = reinterpret_cast<PartitionFreelistEntry*>(slot); |
| 757 PartitionFreelistEntry* nextEntry = | 734 PartitionFreelistEntry* nextEntry = |
| 758 reinterpret_cast<PartitionFreelistEntry*>(slot); | 735 reinterpret_cast<PartitionFreelistEntry*>(slot); |
| 759 nextEntry->next = partitionFreelistMask(0); | 736 nextEntry->next = partitionFreelistMask(0); |
| 760 | 737 |
| 761 ASSERT(!bucket->activePagesHead); | 738 DCHECK(!bucket->activePagesHead); |
| 762 ASSERT(!bucket->emptyPagesHead); | 739 DCHECK(!bucket->emptyPagesHead); |
| 763 ASSERT(!bucket->decommittedPagesHead); | 740 DCHECK(!bucket->decommittedPagesHead); |
| 764 ASSERT(!bucket->numSystemPagesPerSlotSpan); | 741 DCHECK(!bucket->numSystemPagesPerSlotSpan); |
| 765 ASSERT(!bucket->numFullPages); | 742 DCHECK(!bucket->numFullPages); |
| 766 bucket->slotSize = size; | 743 bucket->slotSize = size; |
| 767 | 744 |
| 768 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); | 745 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); |
| 769 mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; | 746 mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; |
| 770 mapExtent->bucket = bucket; | 747 mapExtent->bucket = bucket; |
| 771 | 748 |
| 772 // Maintain the doubly-linked list of all direct mappings. | 749 // Maintain the doubly-linked list of all direct mappings. |
| 773 mapExtent->nextExtent = root->directMapList; | 750 mapExtent->nextExtent = root->directMapList; |
| 774 if (mapExtent->nextExtent) | 751 if (mapExtent->nextExtent) |
| 775 mapExtent->nextExtent->prevExtent = mapExtent; | 752 mapExtent->nextExtent->prevExtent = mapExtent; |
| 776 mapExtent->prevExtent = nullptr; | 753 mapExtent->prevExtent = nullptr; |
| 777 root->directMapList = mapExtent; | 754 root->directMapList = mapExtent; |
| 778 | 755 |
| 779 return page; | 756 return page; |
| 780 } | 757 } |
| 781 | 758 |
| 782 static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) { | 759 static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) { |
| 783 PartitionRootBase* root = partitionPageToRoot(page); | 760 PartitionRootBase* root = partitionPageToRoot(page); |
| 784 const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page); | 761 const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page); |
| 785 size_t unmapSize = extent->mapSize; | 762 size_t unmapSize = extent->mapSize; |
| 786 | 763 |
| 787 // Maintain the doubly-linked list of all direct mappings. | 764 // Maintain the doubly-linked list of all direct mappings. |
| 788 if (extent->prevExtent) { | 765 if (extent->prevExtent) { |
| 789 ASSERT(extent->prevExtent->nextExtent == extent); | 766 DCHECK(extent->prevExtent->nextExtent == extent); |
| 790 extent->prevExtent->nextExtent = extent->nextExtent; | 767 extent->prevExtent->nextExtent = extent->nextExtent; |
| 791 } else { | 768 } else { |
| 792 root->directMapList = extent->nextExtent; | 769 root->directMapList = extent->nextExtent; |
| 793 } | 770 } |
| 794 if (extent->nextExtent) { | 771 if (extent->nextExtent) { |
| 795 ASSERT(extent->nextExtent->prevExtent == extent); | 772 DCHECK(extent->nextExtent->prevExtent == extent); |
| 796 extent->nextExtent->prevExtent = extent->prevExtent; | 773 extent->nextExtent->prevExtent = extent->prevExtent; |
| 797 } | 774 } |
| 798 | 775 |
| 799 // Add on the size of the trailing guard page and preceeding partition | 776 // Add on the size of the trailing guard page and preceeding partition |
| 800 // page. | 777 // page. |
| 801 unmapSize += kPartitionPageSize + kSystemPageSize; | 778 unmapSize += kPartitionPageSize + kSystemPageSize; |
| 802 | 779 |
| 803 size_t uncommittedPageSize = page->bucket->slotSize + kSystemPageSize; | 780 size_t uncommittedPageSize = page->bucket->slotSize + kSystemPageSize; |
| 804 partitionDecreaseCommittedPages(root, uncommittedPageSize); | 781 partitionDecreaseCommittedPages(root, uncommittedPageSize); |
| 805 ASSERT(root->totalSizeOfDirectMappedPages >= uncommittedPageSize); | 782 DCHECK(root->totalSizeOfDirectMappedPages >= uncommittedPageSize); |
| 806 root->totalSizeOfDirectMappedPages -= uncommittedPageSize; | 783 root->totalSizeOfDirectMappedPages -= uncommittedPageSize; |
| 807 | 784 |
| 808 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask)); | 785 DCHECK(!(unmapSize & kPageAllocationGranularityOffsetMask)); |
| 809 | 786 |
| 810 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 787 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 811 // Account for the mapping starting a partition page before the actual | 788 // Account for the mapping starting a partition page before the actual |
| 812 // allocation address. | 789 // allocation address. |
| 813 ptr -= kPartitionPageSize; | 790 ptr -= kPartitionPageSize; |
| 814 | 791 |
| 815 freePages(ptr, unmapSize); | 792 freePages(ptr, unmapSize); |
| 816 } | 793 } |
| 817 | 794 |
| 818 void* partitionAllocSlowPath(PartitionRootBase* root, | 795 void* partitionAllocSlowPath(PartitionRootBase* root, |
| 819 int flags, | 796 int flags, |
| 820 size_t size, | 797 size_t size, |
| 821 PartitionBucket* bucket) { | 798 PartitionBucket* bucket) { |
| 822 // The slow path is called when the freelist is empty. | 799 // The slow path is called when the freelist is empty. |
| 823 ASSERT(!bucket->activePagesHead->freelistHead); | 800 DCHECK(!bucket->activePagesHead->freelistHead); |
| 824 | 801 |
| 825 PartitionPage* newPage = nullptr; | 802 PartitionPage* newPage = nullptr; |
| 826 | 803 |
| 827 // For the partitionAllocGeneric API, we have a bunch of buckets marked | 804 // For the partitionAllocGeneric API, we have a bunch of buckets marked |
| 828 // as special cases. We bounce them through to the slow path so that we | 805 // as special cases. We bounce them through to the slow path so that we |
| 829 // can still have a blazing fast hot path due to lack of corner-case | 806 // can still have a blazing fast hot path due to lack of corner-case |
| 830 // branches. | 807 // branches. |
| 831 bool returnNull = flags & PartitionAllocReturnNull; | 808 bool returnNull = flags & PartitionAllocReturnNull; |
| 832 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 809 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
| 833 ASSERT(size > kGenericMaxBucketed); | 810 DCHECK(size > kGenericMaxBucketed); |
| 834 ASSERT(bucket == &PartitionRootBase::gPagedBucket); | 811 DCHECK(bucket == &PartitionRootBase::gPagedBucket); |
| 835 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); | 812 DCHECK(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); |
| 836 if (size > kGenericMaxDirectMapped) { | 813 if (size > kGenericMaxDirectMapped) { |
| 837 if (returnNull) | 814 if (returnNull) |
| 838 return nullptr; | 815 return nullptr; |
| 839 partitionExcessiveAllocationSize(); | 816 partitionExcessiveAllocationSize(); |
| 840 } | 817 } |
| 841 newPage = partitionDirectMap(root, flags, size); | 818 newPage = partitionDirectMap(root, flags, size); |
| 842 } else if (LIKELY(partitionSetNewActivePage(bucket))) { | 819 } else if (LIKELY(partitionSetNewActivePage(bucket))) { |
| 843 // First, did we find an active page in the active pages list? | 820 // First, did we find an active page in the active pages list? |
| 844 newPage = bucket->activePagesHead; | 821 newPage = bucket->activePagesHead; |
| 845 ASSERT(partitionPageStateIsActive(newPage)); | 822 DCHECK(partitionPageStateIsActive(newPage)); |
| 846 } else if (LIKELY(bucket->emptyPagesHead != nullptr) || | 823 } else if (LIKELY(bucket->emptyPagesHead != nullptr) || |
| 847 LIKELY(bucket->decommittedPagesHead != nullptr)) { | 824 LIKELY(bucket->decommittedPagesHead != nullptr)) { |
| 848 // Second, look in our lists of empty and decommitted pages. | 825 // Second, look in our lists of empty and decommitted pages. |
| 849 // Check empty pages first, which are preferred, but beware that an | 826 // Check empty pages first, which are preferred, but beware that an |
| 850 // empty page might have been decommitted. | 827 // empty page might have been decommitted. |
| 851 while (LIKELY((newPage = bucket->emptyPagesHead) != nullptr)) { | 828 while (LIKELY((newPage = bucket->emptyPagesHead) != nullptr)) { |
| 852 ASSERT(newPage->bucket == bucket); | 829 DCHECK(newPage->bucket == bucket); |
| 853 ASSERT(partitionPageStateIsEmpty(newPage) || | 830 DCHECK(partitionPageStateIsEmpty(newPage) || |
| 854 partitionPageStateIsDecommitted(newPage)); | 831 partitionPageStateIsDecommitted(newPage)); |
| 855 bucket->emptyPagesHead = newPage->nextPage; | 832 bucket->emptyPagesHead = newPage->nextPage; |
| 856 // Accept the empty page unless it got decommitted. | 833 // Accept the empty page unless it got decommitted. |
| 857 if (newPage->freelistHead) { | 834 if (newPage->freelistHead) { |
| 858 newPage->nextPage = nullptr; | 835 newPage->nextPage = nullptr; |
| 859 break; | 836 break; |
| 860 } | 837 } |
| 861 ASSERT(partitionPageStateIsDecommitted(newPage)); | 838 DCHECK(partitionPageStateIsDecommitted(newPage)); |
| 862 newPage->nextPage = bucket->decommittedPagesHead; | 839 newPage->nextPage = bucket->decommittedPagesHead; |
| 863 bucket->decommittedPagesHead = newPage; | 840 bucket->decommittedPagesHead = newPage; |
| 864 } | 841 } |
| 865 if (UNLIKELY(!newPage) && LIKELY(bucket->decommittedPagesHead != nullptr)) { | 842 if (UNLIKELY(!newPage) && LIKELY(bucket->decommittedPagesHead != nullptr)) { |
| 866 newPage = bucket->decommittedPagesHead; | 843 newPage = bucket->decommittedPagesHead; |
| 867 ASSERT(newPage->bucket == bucket); | 844 DCHECK(newPage->bucket == bucket); |
| 868 ASSERT(partitionPageStateIsDecommitted(newPage)); | 845 DCHECK(partitionPageStateIsDecommitted(newPage)); |
| 869 bucket->decommittedPagesHead = newPage->nextPage; | 846 bucket->decommittedPagesHead = newPage->nextPage; |
| 870 void* addr = partitionPageToPointer(newPage); | 847 void* addr = partitionPageToPointer(newPage); |
| 871 partitionRecommitSystemPages(root, addr, | 848 partitionRecommitSystemPages(root, addr, |
| 872 partitionBucketBytes(newPage->bucket)); | 849 partitionBucketBytes(newPage->bucket)); |
| 873 partitionPageReset(newPage); | 850 partitionPageReset(newPage); |
| 874 } | 851 } |
| 875 ASSERT(newPage); | 852 DCHECK(newPage); |
| 876 } else { | 853 } else { |
| 877 // Third. If we get here, we need a brand new page. | 854 // Third. If we get here, we need a brand new page. |
| 878 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | 855 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); |
| 879 void* rawPages = | 856 void* rawPages = |
| 880 partitionAllocPartitionPages(root, flags, numPartitionPages); | 857 partitionAllocPartitionPages(root, flags, numPartitionPages); |
| 881 if (LIKELY(rawPages != nullptr)) { | 858 if (LIKELY(rawPages != nullptr)) { |
| 882 newPage = partitionPointerToPageNoAlignmentCheck(rawPages); | 859 newPage = partitionPointerToPageNoAlignmentCheck(rawPages); |
| 883 partitionPageSetup(newPage, bucket); | 860 partitionPageSetup(newPage, bucket); |
| 884 } | 861 } |
| 885 } | 862 } |
| 886 | 863 |
| 887 // Bail if we had a memory allocation failure. | 864 // Bail if we had a memory allocation failure. |
| 888 if (UNLIKELY(!newPage)) { | 865 if (UNLIKELY(!newPage)) { |
| 889 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); | 866 DCHECK(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); |
| 890 if (returnNull) | 867 if (returnNull) |
| 891 return nullptr; | 868 return nullptr; |
| 892 partitionOutOfMemory(root); | 869 partitionOutOfMemory(root); |
| 893 } | 870 } |
| 894 | 871 |
| 895 bucket = newPage->bucket; | 872 bucket = newPage->bucket; |
| 896 ASSERT(bucket != &PartitionRootBase::gPagedBucket); | 873 DCHECK(bucket != &PartitionRootBase::gPagedBucket); |
| 897 bucket->activePagesHead = newPage; | 874 bucket->activePagesHead = newPage; |
| 898 partitionPageSetRawSize(newPage, size); | 875 partitionPageSetRawSize(newPage, size); |
| 899 | 876 |
| 900 // If we found an active page with free slots, or an empty page, we have a | 877 // If we found an active page with free slots, or an empty page, we have a |
| 901 // usable freelist head. | 878 // usable freelist head. |
| 902 if (LIKELY(newPage->freelistHead != nullptr)) { | 879 if (LIKELY(newPage->freelistHead != nullptr)) { |
| 903 PartitionFreelistEntry* entry = newPage->freelistHead; | 880 PartitionFreelistEntry* entry = newPage->freelistHead; |
| 904 PartitionFreelistEntry* newHead = partitionFreelistMask(entry->next); | 881 PartitionFreelistEntry* newHead = partitionFreelistMask(entry->next); |
| 905 newPage->freelistHead = newHead; | 882 newPage->freelistHead = newHead; |
| 906 newPage->numAllocatedSlots++; | 883 newPage->numAllocatedSlots++; |
| 907 return entry; | 884 return entry; |
| 908 } | 885 } |
| 909 // Otherwise, we need to build the freelist. | 886 // Otherwise, we need to build the freelist. |
| 910 ASSERT(newPage->numUnprovisionedSlots); | 887 DCHECK(newPage->numUnprovisionedSlots); |
| 911 return partitionPageAllocAndFillFreelist(newPage); | 888 return partitionPageAllocAndFillFreelist(newPage); |
| 912 } | 889 } |
| 913 | 890 |
| 914 static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, | 891 static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, |
| 915 PartitionPage* page) { | 892 PartitionPage* page) { |
| 916 ASSERT(partitionPageStateIsEmpty(page)); | 893 DCHECK(partitionPageStateIsEmpty(page)); |
| 917 ASSERT(!partitionBucketIsDirectMapped(page->bucket)); | 894 DCHECK(!partitionBucketIsDirectMapped(page->bucket)); |
| 918 void* addr = partitionPageToPointer(page); | 895 void* addr = partitionPageToPointer(page); |
| 919 partitionDecommitSystemPages(root, addr, partitionBucketBytes(page->bucket)); | 896 partitionDecommitSystemPages(root, addr, partitionBucketBytes(page->bucket)); |
| 920 | 897 |
| 921 // We actually leave the decommitted page in the active list. We'll sweep | 898 // We actually leave the decommitted page in the active list. We'll sweep |
| 922 // it on to the decommitted page list when we next walk the active page | 899 // it on to the decommitted page list when we next walk the active page |
| 923 // list. | 900 // list. |
| 924 // Pulling this trick enables us to use a singly-linked page list for all | 901 // Pulling this trick enables us to use a singly-linked page list for all |
| 925 // cases, which is critical in keeping the page metadata structure down to | 902 // cases, which is critical in keeping the page metadata structure down to |
| 926 // 32 bytes in size. | 903 // 32 bytes in size. |
| 927 page->freelistHead = 0; | 904 page->freelistHead = 0; |
| 928 page->numUnprovisionedSlots = 0; | 905 page->numUnprovisionedSlots = 0; |
| 929 ASSERT(partitionPageStateIsDecommitted(page)); | 906 DCHECK(partitionPageStateIsDecommitted(page)); |
| 930 } | 907 } |
| 931 | 908 |
| 932 static void partitionDecommitPageIfPossible(PartitionRootBase* root, | 909 static void partitionDecommitPageIfPossible(PartitionRootBase* root, |
| 933 PartitionPage* page) { | 910 PartitionPage* page) { |
| 934 ASSERT(page->emptyCacheIndex >= 0); | 911 DCHECK(page->emptyCacheIndex >= 0); |
| 935 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); | 912 DCHECK(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); |
| 936 ASSERT(page == root->globalEmptyPageRing[page->emptyCacheIndex]); | 913 DCHECK(page == root->globalEmptyPageRing[page->emptyCacheIndex]); |
| 937 page->emptyCacheIndex = -1; | 914 page->emptyCacheIndex = -1; |
| 938 if (partitionPageStateIsEmpty(page)) | 915 if (partitionPageStateIsEmpty(page)) |
| 939 partitionDecommitPage(root, page); | 916 partitionDecommitPage(root, page); |
| 940 } | 917 } |
| 941 | 918 |
| 942 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) { | 919 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) { |
| 943 ASSERT(partitionPageStateIsEmpty(page)); | 920 DCHECK(partitionPageStateIsEmpty(page)); |
| 944 PartitionRootBase* root = partitionPageToRoot(page); | 921 PartitionRootBase* root = partitionPageToRoot(page); |
| 945 | 922 |
| 946 // If the page is already registered as empty, give it another life. | 923 // If the page is already registered as empty, give it another life. |
| 947 if (page->emptyCacheIndex != -1) { | 924 if (page->emptyCacheIndex != -1) { |
| 948 ASSERT(page->emptyCacheIndex >= 0); | 925 DCHECK(page->emptyCacheIndex >= 0); |
| 949 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); | 926 DCHECK(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); |
| 950 ASSERT(root->globalEmptyPageRing[page->emptyCacheIndex] == page); | 927 DCHECK(root->globalEmptyPageRing[page->emptyCacheIndex] == page); |
| 951 root->globalEmptyPageRing[page->emptyCacheIndex] = 0; | 928 root->globalEmptyPageRing[page->emptyCacheIndex] = 0; |
| 952 } | 929 } |
| 953 | 930 |
| 954 int16_t currentIndex = root->globalEmptyPageRingIndex; | 931 int16_t currentIndex = root->globalEmptyPageRingIndex; |
| 955 PartitionPage* pageToDecommit = root->globalEmptyPageRing[currentIndex]; | 932 PartitionPage* pageToDecommit = root->globalEmptyPageRing[currentIndex]; |
| 956 // The page might well have been re-activated, filled up, etc. before we get | 933 // The page might well have been re-activated, filled up, etc. before we get |
| 957 // around to looking at it here. | 934 // around to looking at it here. |
| 958 if (pageToDecommit) | 935 if (pageToDecommit) |
| 959 partitionDecommitPageIfPossible(root, pageToDecommit); | 936 partitionDecommitPageIfPossible(root, pageToDecommit); |
| 960 | 937 |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 974 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | 951 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { |
| 975 PartitionPage* page = root->globalEmptyPageRing[i]; | 952 PartitionPage* page = root->globalEmptyPageRing[i]; |
| 976 if (page) | 953 if (page) |
| 977 partitionDecommitPageIfPossible(root, page); | 954 partitionDecommitPageIfPossible(root, page); |
| 978 root->globalEmptyPageRing[i] = nullptr; | 955 root->globalEmptyPageRing[i] = nullptr; |
| 979 } | 956 } |
| 980 } | 957 } |
| 981 | 958 |
| 982 void partitionFreeSlowPath(PartitionPage* page) { | 959 void partitionFreeSlowPath(PartitionPage* page) { |
| 983 PartitionBucket* bucket = page->bucket; | 960 PartitionBucket* bucket = page->bucket; |
| 984 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 961 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 985 if (LIKELY(page->numAllocatedSlots == 0)) { | 962 if (LIKELY(page->numAllocatedSlots == 0)) { |
| 986 // Page became fully unused. | 963 // Page became fully unused. |
| 987 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 964 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
| 988 partitionDirectUnmap(page); | 965 partitionDirectUnmap(page); |
| 989 return; | 966 return; |
| 990 } | 967 } |
| 991 // If it's the current active page, change it. We bounce the page to | 968 // If it's the current active page, change it. We bounce the page to |
| 992 // the empty list as a force towards defragmentation. | 969 // the empty list as a force towards defragmentation. |
| 993 if (LIKELY(page == bucket->activePagesHead)) | 970 if (LIKELY(page == bucket->activePagesHead)) |
| 994 (void)partitionSetNewActivePage(bucket); | 971 (void)partitionSetNewActivePage(bucket); |
| 995 ASSERT(bucket->activePagesHead != page); | 972 DCHECK(bucket->activePagesHead != page); |
| 996 | 973 |
| 997 partitionPageSetRawSize(page, 0); | 974 partitionPageSetRawSize(page, 0); |
| 998 ASSERT(!partitionPageGetRawSize(page)); | 975 DCHECK(!partitionPageGetRawSize(page)); |
| 999 | 976 |
| 1000 partitionRegisterEmptyPage(page); | 977 partitionRegisterEmptyPage(page); |
| 1001 } else { | 978 } else { |
| 1002 ASSERT(!partitionBucketIsDirectMapped(bucket)); | 979 DCHECK(!partitionBucketIsDirectMapped(bucket)); |
| 1003 // Ensure that the page is full. That's the only valid case if we | 980 // Ensure that the page is full. That's the only valid case if we |
| 1004 // arrive here. | 981 // arrive here. |
| 1005 ASSERT(page->numAllocatedSlots < 0); | 982 DCHECK(page->numAllocatedSlots < 0); |
| 1006 // A transition of numAllocatedSlots from 0 to -1 is not legal, and | 983 // A transition of numAllocatedSlots from 0 to -1 is not legal, and |
| 1007 // likely indicates a double-free. | 984 // likely indicates a double-free. |
| 1008 SECURITY_CHECK(page->numAllocatedSlots != -1); | 985 SECURITY_CHECK(page->numAllocatedSlots != -1); |
| 1009 page->numAllocatedSlots = -page->numAllocatedSlots - 2; | 986 page->numAllocatedSlots = -page->numAllocatedSlots - 2; |
| 1010 ASSERT(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1); | 987 DCHECK(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1); |
| 1011 // Fully used page became partially used. It must be put back on the | 988 // Fully used page became partially used. It must be put back on the |
| 1012 // non-full page list. Also make it the current page to increase the | 989 // non-full page list. Also make it the current page to increase the |
| 1013 // chances of it being filled up again. The old current page will be | 990 // chances of it being filled up again. The old current page will be |
| 1014 // the next page. | 991 // the next page. |
| 1015 ASSERT(!page->nextPage); | 992 DCHECK(!page->nextPage); |
| 1016 if (LIKELY(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage)) | 993 if (LIKELY(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage)) |
| 1017 page->nextPage = bucket->activePagesHead; | 994 page->nextPage = bucket->activePagesHead; |
| 1018 bucket->activePagesHead = page; | 995 bucket->activePagesHead = page; |
| 1019 --bucket->numFullPages; | 996 --bucket->numFullPages; |
| 1020 // Special case: for a partition page with just a single slot, it may | 997 // Special case: for a partition page with just a single slot, it may |
| 1021 // now be empty and we want to run it through the empty logic. | 998 // now be empty and we want to run it through the empty logic. |
| 1022 if (UNLIKELY(page->numAllocatedSlots == 0)) | 999 if (UNLIKELY(page->numAllocatedSlots == 0)) |
| 1023 partitionFreeSlowPath(page); | 1000 partitionFreeSlowPath(page); |
| 1024 } | 1001 } |
| 1025 } | 1002 } |
| 1026 | 1003 |
| 1027 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, | 1004 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, |
| 1028 PartitionPage* page, | 1005 PartitionPage* page, |
| 1029 size_t rawSize) { | 1006 size_t rawSize) { |
| 1030 ASSERT(partitionBucketIsDirectMapped(page->bucket)); | 1007 DCHECK(partitionBucketIsDirectMapped(page->bucket)); |
| 1031 | 1008 |
| 1032 rawSize = partitionCookieSizeAdjustAdd(rawSize); | 1009 rawSize = partitionCookieSizeAdjustAdd(rawSize); |
| 1033 | 1010 |
| 1034 // Note that the new size might be a bucketed size; this function is called | 1011 // Note that the new size might be a bucketed size; this function is called |
| 1035 // whenever we're reallocating a direct mapped allocation. | 1012 // whenever we're reallocating a direct mapped allocation. |
| 1036 size_t newSize = partitionDirectMapSize(rawSize); | 1013 size_t newSize = partitionDirectMapSize(rawSize); |
| 1037 if (newSize < kGenericMinDirectMappedDownsize) | 1014 if (newSize < kGenericMinDirectMappedDownsize) |
| 1038 return false; | 1015 return false; |
| 1039 | 1016 |
| 1040 // bucket->slotSize is the current size of the allocation. | 1017 // bucket->slotSize is the current size of the allocation. |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 1054 | 1031 |
| 1055 // Shrink by decommitting unneeded pages and making them inaccessible. | 1032 // Shrink by decommitting unneeded pages and making them inaccessible. |
| 1056 size_t decommitSize = currentSize - newSize; | 1033 size_t decommitSize = currentSize - newSize; |
| 1057 partitionDecommitSystemPages(root, charPtr + newSize, decommitSize); | 1034 partitionDecommitSystemPages(root, charPtr + newSize, decommitSize); |
| 1058 setSystemPagesInaccessible(charPtr + newSize, decommitSize); | 1035 setSystemPagesInaccessible(charPtr + newSize, decommitSize); |
| 1059 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { | 1036 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { |
| 1060 // Grow within the actually allocated memory. Just need to make the | 1037 // Grow within the actually allocated memory. Just need to make the |
| 1061 // pages accessible again. | 1038 // pages accessible again. |
| 1062 size_t recommitSize = newSize - currentSize; | 1039 size_t recommitSize = newSize - currentSize; |
| 1063 bool ret = setSystemPagesAccessible(charPtr + currentSize, recommitSize); | 1040 bool ret = setSystemPagesAccessible(charPtr + currentSize, recommitSize); |
| 1064 RELEASE_ASSERT(ret); | 1041 CHECK(ret); |
| 1065 partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize); | 1042 partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize); |
| 1066 | 1043 |
| 1067 #if ENABLE(ASSERT) | 1044 #if DCHECK_IS_ON() |
| 1068 memset(charPtr + currentSize, kUninitializedByte, recommitSize); | 1045 memset(charPtr + currentSize, kUninitializedByte, recommitSize); |
| 1069 #endif | 1046 #endif |
| 1070 } else { | 1047 } else { |
| 1071 // We can't perform the realloc in-place. | 1048 // We can't perform the realloc in-place. |
| 1072 // TODO: support this too when possible. | 1049 // TODO: support this too when possible. |
| 1073 return false; | 1050 return false; |
| 1074 } | 1051 } |
| 1075 | 1052 |
| 1076 #if ENABLE(ASSERT) | 1053 #if DCHECK_IS_ON() |
| 1077 // Write a new trailing cookie. | 1054 // Write a new trailing cookie. |
| 1078 partitionCookieWriteValue(charPtr + rawSize - kCookieSize); | 1055 partitionCookieWriteValue(charPtr + rawSize - kCookieSize); |
| 1079 #endif | 1056 #endif |
| 1080 | 1057 |
| 1081 partitionPageSetRawSize(page, rawSize); | 1058 partitionPageSetRawSize(page, rawSize); |
| 1082 ASSERT(partitionPageGetRawSize(page) == rawSize); | 1059 DCHECK(partitionPageGetRawSize(page) == rawSize); |
| 1083 | 1060 |
| 1084 page->bucket->slotSize = newSize; | 1061 page->bucket->slotSize = newSize; |
| 1085 return true; | 1062 return true; |
| 1086 } | 1063 } |
| 1087 | 1064 |
| 1088 void* partitionReallocGeneric(PartitionRootGeneric* root, | 1065 void* partitionReallocGeneric(PartitionRootGeneric* root, |
| 1089 void* ptr, | 1066 void* ptr, |
| 1090 size_t newSize, | 1067 size_t newSize, |
| 1091 const char* typeName) { | 1068 const char* typeName) { |
| 1092 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 1069 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 1093 return realloc(ptr, newSize); | 1070 return realloc(ptr, newSize); |
| 1094 #else | 1071 #else |
| 1095 if (UNLIKELY(!ptr)) | 1072 if (UNLIKELY(!ptr)) |
| 1096 return partitionAllocGeneric(root, newSize, typeName); | 1073 return partitionAllocGeneric(root, newSize, typeName); |
| 1097 if (UNLIKELY(!newSize)) { | 1074 if (UNLIKELY(!newSize)) { |
| 1098 partitionFreeGeneric(root, ptr); | 1075 partitionFreeGeneric(root, ptr); |
| 1099 return 0; | 1076 return 0; |
| 1100 } | 1077 } |
| 1101 | 1078 |
| 1102 if (newSize > kGenericMaxDirectMapped) | 1079 if (newSize > kGenericMaxDirectMapped) |
| 1103 partitionExcessiveAllocationSize(); | 1080 partitionExcessiveAllocationSize(); |
| 1104 | 1081 |
| 1105 ASSERT(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr))); | 1082 DCHECK(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr))); |
| 1106 | 1083 |
| 1107 PartitionPage* page = | 1084 PartitionPage* page = |
| 1108 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 1085 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); |
| 1109 | 1086 |
| 1110 if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) { | 1087 if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) { |
| 1111 // We may be able to perform the realloc in place by changing the | 1088 // We may be able to perform the realloc in place by changing the |
| 1112 // accessibility of memory pages and, if reducing the size, decommitting | 1089 // accessibility of memory pages and, if reducing the size, decommitting |
| 1113 // them. | 1090 // them. |
| 1114 if (partitionReallocDirectMappedInPlace(root, page, newSize)) { | 1091 if (partitionReallocDirectMappedInPlace(root, page, newSize)) { |
| 1115 PartitionAllocHooks::reallocHookIfEnabled(ptr, ptr, newSize, typeName); | 1092 PartitionAllocHooks::reallocHookIfEnabled(ptr, ptr, newSize, typeName); |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1147 size_t slotSize = bucket->slotSize; | 1124 size_t slotSize = bucket->slotSize; |
| 1148 if (slotSize < kSystemPageSize || !page->numAllocatedSlots) | 1125 if (slotSize < kSystemPageSize || !page->numAllocatedSlots) |
| 1149 return 0; | 1126 return 0; |
| 1150 | 1127 |
| 1151 size_t bucketNumSlots = partitionBucketSlots(bucket); | 1128 size_t bucketNumSlots = partitionBucketSlots(bucket); |
| 1152 size_t discardableBytes = 0; | 1129 size_t discardableBytes = 0; |
| 1153 | 1130 |
| 1154 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | 1131 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); |
| 1155 if (rawSize) { | 1132 if (rawSize) { |
| 1156 uint32_t usedBytes = | 1133 uint32_t usedBytes = |
| 1157 static_cast<uint32_t>(WTF::roundUpToSystemPage(rawSize)); | 1134 static_cast<uint32_t>(base::roundUpToSystemPage(rawSize)); |
|
Primiano Tucci (use gerrit)
2016/11/22 14:28:32
drop base::
palmer
2016/11/24 01:05:56
Done.
| |
| 1158 discardableBytes = bucket->slotSize - usedBytes; | 1135 discardableBytes = bucket->slotSize - usedBytes; |
| 1159 if (discardableBytes && discard) { | 1136 if (discardableBytes && discard) { |
| 1160 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 1137 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 1161 ptr += usedBytes; | 1138 ptr += usedBytes; |
| 1162 discardSystemPages(ptr, discardableBytes); | 1139 discardSystemPages(ptr, discardableBytes); |
| 1163 } | 1140 } |
| 1164 return discardableBytes; | 1141 return discardableBytes; |
| 1165 } | 1142 } |
| 1166 | 1143 |
| 1167 const size_t maxSlotCount = | 1144 const size_t maxSlotCount = |
| 1168 (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; | 1145 (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; |
| 1169 ASSERT(bucketNumSlots <= maxSlotCount); | 1146 DCHECK(bucketNumSlots <= maxSlotCount); |
| 1170 ASSERT(page->numUnprovisionedSlots < bucketNumSlots); | 1147 DCHECK(page->numUnprovisionedSlots < bucketNumSlots); |
| 1171 size_t numSlots = bucketNumSlots - page->numUnprovisionedSlots; | 1148 size_t numSlots = bucketNumSlots - page->numUnprovisionedSlots; |
| 1172 char slotUsage[maxSlotCount]; | 1149 char slotUsage[maxSlotCount]; |
| 1173 size_t lastSlot = static_cast<size_t>(-1); | 1150 size_t lastSlot = static_cast<size_t>(-1); |
| 1174 memset(slotUsage, 1, numSlots); | 1151 memset(slotUsage, 1, numSlots); |
| 1175 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 1152 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 1176 PartitionFreelistEntry* entry = page->freelistHead; | 1153 PartitionFreelistEntry* entry = page->freelistHead; |
| 1177 // First, walk the freelist for this page and make a bitmap of which slots | 1154 // First, walk the freelist for this page and make a bitmap of which slots |
| 1178 // are not in use. | 1155 // are not in use. |
| 1179 while (entry) { | 1156 while (entry) { |
| 1180 size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slotSize; | 1157 size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slotSize; |
| 1181 ASSERT(slotIndex < numSlots); | 1158 DCHECK(slotIndex < numSlots); |
| 1182 slotUsage[slotIndex] = 0; | 1159 slotUsage[slotIndex] = 0; |
| 1183 entry = partitionFreelistMask(entry->next); | 1160 entry = partitionFreelistMask(entry->next); |
| 1184 // If we have a slot where the masked freelist entry is 0, we can | 1161 // If we have a slot where the masked freelist entry is 0, we can |
| 1185 // actually discard that freelist entry because touching a discarded | 1162 // actually discard that freelist entry because touching a discarded |
| 1186 // page is guaranteed to return original content or 0. | 1163 // page is guaranteed to return original content or 0. |
| 1187 // (Note that this optimization won't fire on big endian machines | 1164 // (Note that this optimization won't fire on big endian machines |
| 1188 // because the masking function is negation.) | 1165 // because the masking function is negation.) |
| 1189 if (!partitionFreelistMask(entry)) | 1166 if (!partitionFreelistMask(entry)) |
| 1190 lastSlot = slotIndex; | 1167 lastSlot = slotIndex; |
| 1191 } | 1168 } |
| 1192 | 1169 |
| 1193 // If the slot(s) at the end of the slot span are not in used, we can | 1170 // If the slot(s) at the end of the slot span are not in used, we can |
| 1194 // truncate them entirely and rewrite the freelist. | 1171 // truncate them entirely and rewrite the freelist. |
| 1195 size_t truncatedSlots = 0; | 1172 size_t truncatedSlots = 0; |
| 1196 while (!slotUsage[numSlots - 1]) { | 1173 while (!slotUsage[numSlots - 1]) { |
| 1197 truncatedSlots++; | 1174 truncatedSlots++; |
| 1198 numSlots--; | 1175 numSlots--; |
| 1199 ASSERT(numSlots); | 1176 DCHECK(numSlots); |
| 1200 } | 1177 } |
| 1201 // First, do the work of calculating the discardable bytes. Don't actually | 1178 // First, do the work of calculating the discardable bytes. Don't actually |
| 1202 // discard anything unless the discard flag was passed in. | 1179 // discard anything unless the discard flag was passed in. |
| 1203 char* beginPtr = nullptr; | 1180 char* beginPtr = nullptr; |
| 1204 char* endPtr = nullptr; | 1181 char* endPtr = nullptr; |
| 1205 size_t unprovisionedBytes = 0; | 1182 size_t unprovisionedBytes = 0; |
| 1206 if (truncatedSlots) { | 1183 if (truncatedSlots) { |
| 1207 beginPtr = ptr + (numSlots * slotSize); | 1184 beginPtr = ptr + (numSlots * slotSize); |
| 1208 endPtr = beginPtr + (slotSize * truncatedSlots); | 1185 endPtr = beginPtr + (slotSize * truncatedSlots); |
| 1209 beginPtr = reinterpret_cast<char*>( | 1186 beginPtr = reinterpret_cast<char*>( |
| 1210 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); | 1187 base::roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); |
|
Primiano Tucci (use gerrit)
2016/11/22 14:28:32
ditto s/base:://
palmer
2016/11/24 01:05:56
Done.
| |
| 1211 // We round the end pointer here up and not down because we're at the | 1188 // We round the end pointer here up and not down because we're at the |
| 1212 // end of a slot span, so we "own" all the way up the page boundary. | 1189 // end of a slot span, so we "own" all the way up the page boundary. |
| 1213 endPtr = reinterpret_cast<char*>( | 1190 endPtr = reinterpret_cast<char*>( |
| 1214 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(endPtr))); | 1191 base::roundUpToSystemPage(reinterpret_cast<size_t>(endPtr))); |
|
Primiano Tucci (use gerrit)
2016/11/22 14:28:33
and here
palmer
2016/11/24 01:05:56
Done.
| |
| 1215 ASSERT(endPtr <= ptr + partitionBucketBytes(bucket)); | 1192 DCHECK(endPtr <= ptr + partitionBucketBytes(bucket)); |
| 1216 if (beginPtr < endPtr) { | 1193 if (beginPtr < endPtr) { |
| 1217 unprovisionedBytes = endPtr - beginPtr; | 1194 unprovisionedBytes = endPtr - beginPtr; |
| 1218 discardableBytes += unprovisionedBytes; | 1195 discardableBytes += unprovisionedBytes; |
| 1219 } | 1196 } |
| 1220 } | 1197 } |
| 1221 if (unprovisionedBytes && discard) { | 1198 if (unprovisionedBytes && discard) { |
| 1222 ASSERT(truncatedSlots > 0); | 1199 DCHECK(truncatedSlots > 0); |
| 1223 size_t numNewEntries = 0; | 1200 size_t numNewEntries = 0; |
| 1224 page->numUnprovisionedSlots += static_cast<uint16_t>(truncatedSlots); | 1201 page->numUnprovisionedSlots += static_cast<uint16_t>(truncatedSlots); |
| 1225 // Rewrite the freelist. | 1202 // Rewrite the freelist. |
| 1226 PartitionFreelistEntry** entryPtr = &page->freelistHead; | 1203 PartitionFreelistEntry** entryPtr = &page->freelistHead; |
| 1227 for (size_t slotIndex = 0; slotIndex < numSlots; ++slotIndex) { | 1204 for (size_t slotIndex = 0; slotIndex < numSlots; ++slotIndex) { |
| 1228 if (slotUsage[slotIndex]) | 1205 if (slotUsage[slotIndex]) |
| 1229 continue; | 1206 continue; |
| 1230 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>( | 1207 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>( |
| 1231 ptr + (slotSize * slotIndex)); | 1208 ptr + (slotSize * slotIndex)); |
| 1232 *entryPtr = partitionFreelistMask(entry); | 1209 *entryPtr = partitionFreelistMask(entry); |
| 1233 entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry); | 1210 entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry); |
| 1234 numNewEntries++; | 1211 numNewEntries++; |
| 1235 } | 1212 } |
| 1236 // Terminate the freelist chain. | 1213 // Terminate the freelist chain. |
| 1237 *entryPtr = nullptr; | 1214 *entryPtr = nullptr; |
| 1238 // The freelist head is stored unmasked. | 1215 // The freelist head is stored unmasked. |
| 1239 page->freelistHead = partitionFreelistMask(page->freelistHead); | 1216 page->freelistHead = partitionFreelistMask(page->freelistHead); |
| 1240 ASSERT(numNewEntries == numSlots - page->numAllocatedSlots); | 1217 DCHECK(numNewEntries == numSlots - page->numAllocatedSlots); |
| 1241 // Discard the memory. | 1218 // Discard the memory. |
| 1242 discardSystemPages(beginPtr, unprovisionedBytes); | 1219 discardSystemPages(beginPtr, unprovisionedBytes); |
| 1243 } | 1220 } |
| 1244 | 1221 |
| 1245 // Next, walk the slots and for any not in use, consider where the system | 1222 // Next, walk the slots and for any not in use, consider where the system |
| 1246 // page boundaries occur. We can release any system pages back to the | 1223 // page boundaries occur. We can release any system pages back to the |
| 1247 // system as long as we don't interfere with a freelist pointer or an | 1224 // system as long as we don't interfere with a freelist pointer or an |
| 1248 // adjacent slot. | 1225 // adjacent slot. |
| 1249 for (size_t i = 0; i < numSlots; ++i) { | 1226 for (size_t i = 0; i < numSlots; ++i) { |
| 1250 if (slotUsage[i]) | 1227 if (slotUsage[i]) |
| 1251 continue; | 1228 continue; |
| 1252 // The first address we can safely discard is just after the freelist | 1229 // The first address we can safely discard is just after the freelist |
| 1253 // pointer. There's one quirk: if the freelist pointer is actually a | 1230 // pointer. There's one quirk: if the freelist pointer is actually a |
| 1254 // null, we can discard that pointer value too. | 1231 // null, we can discard that pointer value too. |
| 1255 char* beginPtr = ptr + (i * slotSize); | 1232 char* beginPtr = ptr + (i * slotSize); |
| 1256 char* endPtr = beginPtr + slotSize; | 1233 char* endPtr = beginPtr + slotSize; |
| 1257 if (i != lastSlot) | 1234 if (i != lastSlot) |
| 1258 beginPtr += sizeof(PartitionFreelistEntry); | 1235 beginPtr += sizeof(PartitionFreelistEntry); |
| 1259 beginPtr = reinterpret_cast<char*>( | 1236 beginPtr = reinterpret_cast<char*>( |
| 1260 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); | 1237 base::roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); |
|
Primiano Tucci (use gerrit)
2016/11/22 14:28:32
base:: here and below
palmer
2016/11/24 01:05:56
Done.
| |
| 1261 endPtr = reinterpret_cast<char*>( | 1238 endPtr = reinterpret_cast<char*>( |
| 1262 WTF::roundDownToSystemPage(reinterpret_cast<size_t>(endPtr))); | 1239 base::roundDownToSystemPage(reinterpret_cast<size_t>(endPtr))); |
| 1263 if (beginPtr < endPtr) { | 1240 if (beginPtr < endPtr) { |
| 1264 size_t partialSlotBytes = endPtr - beginPtr; | 1241 size_t partialSlotBytes = endPtr - beginPtr; |
| 1265 discardableBytes += partialSlotBytes; | 1242 discardableBytes += partialSlotBytes; |
| 1266 if (discard) | 1243 if (discard) |
| 1267 discardSystemPages(beginPtr, partialSlotBytes); | 1244 discardSystemPages(beginPtr, partialSlotBytes); |
| 1268 } | 1245 } |
| 1269 } | 1246 } |
| 1270 return discardableBytes; | 1247 return discardableBytes; |
| 1271 } | 1248 } |
| 1272 | 1249 |
| 1273 static void partitionPurgeBucket(PartitionBucket* bucket) { | 1250 static void partitionPurgeBucket(PartitionBucket* bucket) { |
| 1274 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { | 1251 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { |
| 1275 for (PartitionPage* page = bucket->activePagesHead; page; | 1252 for (PartitionPage* page = bucket->activePagesHead; page; |
| 1276 page = page->nextPage) { | 1253 page = page->nextPage) { |
| 1277 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 1254 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 1278 (void)partitionPurgePage(page, true); | 1255 (void)partitionPurgePage(page, true); |
| 1279 } | 1256 } |
| 1280 } | 1257 } |
| 1281 } | 1258 } |
| 1282 | 1259 |
| 1283 void partitionPurgeMemory(PartitionRoot* root, int flags) { | 1260 void partitionPurgeMemory(PartitionRoot* root, int flags) { |
| 1284 if (flags & PartitionPurgeDecommitEmptyPages) | 1261 if (flags & PartitionPurgeDecommitEmptyPages) |
| 1285 partitionDecommitEmptyPages(root); | 1262 partitionDecommitEmptyPages(root); |
| 1286 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages | 1263 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages |
| 1287 // here because that flag is only useful for allocations >= system page | 1264 // here because that flag is only useful for allocations >= system page |
| 1288 // size. We only have allocations that large inside generic partitions | 1265 // size. We only have allocations that large inside generic partitions |
| 1289 // at the moment. | 1266 // at the moment. |
| 1290 } | 1267 } |
| 1291 | 1268 |
| 1292 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) { | 1269 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) { |
| 1293 SpinLock::Guard guard(root->lock); | 1270 subtle::SpinLock::Guard guard(root->lock); |
| 1294 if (flags & PartitionPurgeDecommitEmptyPages) | 1271 if (flags & PartitionPurgeDecommitEmptyPages) |
| 1295 partitionDecommitEmptyPages(root); | 1272 partitionDecommitEmptyPages(root); |
| 1296 if (flags & PartitionPurgeDiscardUnusedSystemPages) { | 1273 if (flags & PartitionPurgeDiscardUnusedSystemPages) { |
| 1297 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1274 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1298 PartitionBucket* bucket = &root->buckets[i]; | 1275 PartitionBucket* bucket = &root->buckets[i]; |
| 1299 if (bucket->slotSize >= kSystemPageSize) | 1276 if (bucket->slotSize >= kSystemPageSize) |
| 1300 partitionPurgeBucket(bucket); | 1277 partitionPurgeBucket(bucket); |
| 1301 } | 1278 } |
| 1302 } | 1279 } |
| 1303 } | 1280 } |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 1315 partitionPurgePage(const_cast<PartitionPage*>(page), false); | 1292 partitionPurgePage(const_cast<PartitionPage*>(page), false); |
| 1316 | 1293 |
| 1317 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | 1294 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); |
| 1318 if (rawSize) | 1295 if (rawSize) |
| 1319 statsOut->activeBytes += static_cast<uint32_t>(rawSize); | 1296 statsOut->activeBytes += static_cast<uint32_t>(rawSize); |
| 1320 else | 1297 else |
| 1321 statsOut->activeBytes += | 1298 statsOut->activeBytes += |
| 1322 (page->numAllocatedSlots * statsOut->bucketSlotSize); | 1299 (page->numAllocatedSlots * statsOut->bucketSlotSize); |
| 1323 | 1300 |
| 1324 size_t pageBytesResident = | 1301 size_t pageBytesResident = |
| 1325 WTF::roundUpToSystemPage((bucketNumSlots - page->numUnprovisionedSlots) * | 1302 base::roundUpToSystemPage((bucketNumSlots - page->numUnprovisionedSlots) * |
| 1326 statsOut->bucketSlotSize); | 1303 statsOut->bucketSlotSize); |
| 1327 statsOut->residentBytes += pageBytesResident; | 1304 statsOut->residentBytes += pageBytesResident; |
| 1328 if (partitionPageStateIsEmpty(page)) { | 1305 if (partitionPageStateIsEmpty(page)) { |
| 1329 statsOut->decommittableBytes += pageBytesResident; | 1306 statsOut->decommittableBytes += pageBytesResident; |
| 1330 ++statsOut->numEmptyPages; | 1307 ++statsOut->numEmptyPages; |
| 1331 } else if (partitionPageStateIsFull(page)) { | 1308 } else if (partitionPageStateIsFull(page)) { |
| 1332 ++statsOut->numFullPages; | 1309 ++statsOut->numFullPages; |
| 1333 } else { | 1310 } else { |
| 1334 ASSERT(partitionPageStateIsActive(page)); | 1311 DCHECK(partitionPageStateIsActive(page)); |
| 1335 ++statsOut->numActivePages; | 1312 ++statsOut->numActivePages; |
| 1336 } | 1313 } |
| 1337 } | 1314 } |
| 1338 | 1315 |
| 1339 static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, | 1316 static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, |
| 1340 const PartitionBucket* bucket) { | 1317 const PartitionBucket* bucket) { |
| 1341 ASSERT(!partitionBucketIsDirectMapped(bucket)); | 1318 DCHECK(!partitionBucketIsDirectMapped(bucket)); |
| 1342 statsOut->isValid = false; | 1319 statsOut->isValid = false; |
| 1343 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), | 1320 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), |
| 1344 // the bucket might still need to be reported if it has a list of empty, | 1321 // the bucket might still need to be reported if it has a list of empty, |
| 1345 // decommitted or full pages. | 1322 // decommitted or full pages. |
| 1346 if (bucket->activePagesHead == &PartitionRootGeneric::gSeedPage && | 1323 if (bucket->activePagesHead == &PartitionRootGeneric::gSeedPage && |
| 1347 !bucket->emptyPagesHead && !bucket->decommittedPagesHead && | 1324 !bucket->emptyPagesHead && !bucket->decommittedPagesHead && |
| 1348 !bucket->numFullPages) | 1325 !bucket->numFullPages) |
| 1349 return; | 1326 return; |
| 1350 | 1327 |
| 1351 memset(statsOut, '\0', sizeof(*statsOut)); | 1328 memset(statsOut, '\0', sizeof(*statsOut)); |
| 1352 statsOut->isValid = true; | 1329 statsOut->isValid = true; |
| 1353 statsOut->isDirectMap = false; | 1330 statsOut->isDirectMap = false; |
| 1354 statsOut->numFullPages = static_cast<size_t>(bucket->numFullPages); | 1331 statsOut->numFullPages = static_cast<size_t>(bucket->numFullPages); |
| 1355 statsOut->bucketSlotSize = bucket->slotSize; | 1332 statsOut->bucketSlotSize = bucket->slotSize; |
| 1356 uint16_t bucketNumSlots = partitionBucketSlots(bucket); | 1333 uint16_t bucketNumSlots = partitionBucketSlots(bucket); |
| 1357 size_t bucketUsefulStorage = statsOut->bucketSlotSize * bucketNumSlots; | 1334 size_t bucketUsefulStorage = statsOut->bucketSlotSize * bucketNumSlots; |
| 1358 statsOut->allocatedPageSize = partitionBucketBytes(bucket); | 1335 statsOut->allocatedPageSize = partitionBucketBytes(bucket); |
| 1359 statsOut->activeBytes = bucket->numFullPages * bucketUsefulStorage; | 1336 statsOut->activeBytes = bucket->numFullPages * bucketUsefulStorage; |
| 1360 statsOut->residentBytes = bucket->numFullPages * statsOut->allocatedPageSize; | 1337 statsOut->residentBytes = bucket->numFullPages * statsOut->allocatedPageSize; |
| 1361 | 1338 |
| 1362 for (const PartitionPage* page = bucket->emptyPagesHead; page; | 1339 for (const PartitionPage* page = bucket->emptyPagesHead; page; |
| 1363 page = page->nextPage) { | 1340 page = page->nextPage) { |
| 1364 ASSERT(partitionPageStateIsEmpty(page) || | 1341 DCHECK(partitionPageStateIsEmpty(page) || |
| 1365 partitionPageStateIsDecommitted(page)); | 1342 partitionPageStateIsDecommitted(page)); |
| 1366 partitionDumpPageStats(statsOut, page); | 1343 partitionDumpPageStats(statsOut, page); |
| 1367 } | 1344 } |
| 1368 for (const PartitionPage* page = bucket->decommittedPagesHead; page; | 1345 for (const PartitionPage* page = bucket->decommittedPagesHead; page; |
| 1369 page = page->nextPage) { | 1346 page = page->nextPage) { |
| 1370 ASSERT(partitionPageStateIsDecommitted(page)); | 1347 DCHECK(partitionPageStateIsDecommitted(page)); |
| 1371 partitionDumpPageStats(statsOut, page); | 1348 partitionDumpPageStats(statsOut, page); |
| 1372 } | 1349 } |
| 1373 | 1350 |
| 1374 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { | 1351 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { |
| 1375 for (const PartitionPage* page = bucket->activePagesHead; page; | 1352 for (const PartitionPage* page = bucket->activePagesHead; page; |
| 1376 page = page->nextPage) { | 1353 page = page->nextPage) { |
| 1377 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 1354 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
| 1378 partitionDumpPageStats(statsOut, page); | 1355 partitionDumpPageStats(statsOut, page); |
| 1379 } | 1356 } |
| 1380 } | 1357 } |
| 1381 } | 1358 } |
| 1382 | 1359 |
| 1383 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, | 1360 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, |
| 1384 const char* partitionName, | 1361 const char* partitionName, |
| 1385 bool isLightDump, | 1362 bool isLightDump, |
| 1386 PartitionStatsDumper* partitionStatsDumper) { | 1363 PartitionStatsDumper* partitionStatsDumper) { |
| 1387 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; | 1364 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; |
| 1388 static const size_t kMaxReportableDirectMaps = 4096; | 1365 static const size_t kMaxReportableDirectMaps = 4096; |
| 1389 uint32_t directMapLengths[kMaxReportableDirectMaps]; | 1366 uint32_t directMapLengths[kMaxReportableDirectMaps]; |
| 1390 size_t numDirectMappedAllocations = 0; | 1367 size_t numDirectMappedAllocations = 0; |
| 1391 | 1368 |
| 1392 { | 1369 { |
| 1393 SpinLock::Guard guard(partition->lock); | 1370 subtle::SpinLock::Guard guard(partition->lock); |
| 1394 | 1371 |
| 1395 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1372 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1396 const PartitionBucket* bucket = &partition->buckets[i]; | 1373 const PartitionBucket* bucket = &partition->buckets[i]; |
| 1397 // Don't report the pseudo buckets that the generic allocator sets up in | 1374 // Don't report the pseudo buckets that the generic allocator sets up in |
| 1398 // order to preserve a fast size->bucket map (see | 1375 // order to preserve a fast size->bucket map (see |
| 1399 // partitionAllocGenericInit for details). | 1376 // partitionAllocGenericInit for details). |
| 1400 if (!bucket->activePagesHead) | 1377 if (!bucket->activePagesHead) |
| 1401 bucketStats[i].isValid = false; | 1378 bucketStats[i].isValid = false; |
| 1402 else | 1379 else |
| 1403 partitionDumpBucketStats(&bucketStats[i], bucket); | 1380 partitionDumpBucketStats(&bucketStats[i], bucket); |
| 1404 } | 1381 } |
| 1405 | 1382 |
| 1406 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; | 1383 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; |
| 1407 extent = extent->nextExtent) { | 1384 extent = extent->nextExtent) { |
| 1408 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent); | 1385 DCHECK(!extent->nextExtent || extent->nextExtent->prevExtent == extent); |
| 1409 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; | 1386 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; |
| 1410 ++numDirectMappedAllocations; | 1387 ++numDirectMappedAllocations; |
| 1411 if (numDirectMappedAllocations == kMaxReportableDirectMaps) | 1388 if (numDirectMappedAllocations == kMaxReportableDirectMaps) |
| 1412 break; | 1389 break; |
| 1413 } | 1390 } |
| 1414 } | 1391 } |
| 1415 | 1392 |
| 1416 // partitionsDumpBucketStats is called after collecting stats because it | 1393 // partitionsDumpBucketStats is called after collecting stats because it |
| 1417 // can try to allocate using PartitionAllocGeneric and it can't obtain the | 1394 // can try to allocate using PartitionAllocGeneric and it can't obtain the |
| 1418 // lock. | 1395 // lock. |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1456 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | 1433 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); |
| 1457 } | 1434 } |
| 1458 | 1435 |
| 1459 void partitionDumpStats(PartitionRoot* partition, | 1436 void partitionDumpStats(PartitionRoot* partition, |
| 1460 const char* partitionName, | 1437 const char* partitionName, |
| 1461 bool isLightDump, | 1438 bool isLightDump, |
| 1462 PartitionStatsDumper* partitionStatsDumper) { | 1439 PartitionStatsDumper* partitionStatsDumper) { |
| 1463 static const size_t kMaxReportableBuckets = 4096 / sizeof(void*); | 1440 static const size_t kMaxReportableBuckets = 4096 / sizeof(void*); |
| 1464 PartitionBucketMemoryStats memoryStats[kMaxReportableBuckets]; | 1441 PartitionBucketMemoryStats memoryStats[kMaxReportableBuckets]; |
| 1465 const size_t partitionNumBuckets = partition->numBuckets; | 1442 const size_t partitionNumBuckets = partition->numBuckets; |
| 1466 ASSERT(partitionNumBuckets <= kMaxReportableBuckets); | 1443 DCHECK(partitionNumBuckets <= kMaxReportableBuckets); |
| 1467 | 1444 |
| 1468 for (size_t i = 0; i < partitionNumBuckets; ++i) | 1445 for (size_t i = 0; i < partitionNumBuckets; ++i) |
| 1469 partitionDumpBucketStats(&memoryStats[i], &partition->buckets()[i]); | 1446 partitionDumpBucketStats(&memoryStats[i], &partition->buckets()[i]); |
| 1470 | 1447 |
| 1471 // partitionsDumpBucketStats is called after collecting stats because it | 1448 // partitionsDumpBucketStats is called after collecting stats because it |
| 1472 // can use PartitionAlloc to allocate and this can affect the statistics. | 1449 // can use PartitionAlloc to allocate and this can affect the statistics. |
| 1473 PartitionMemoryStats partitionStats = {0}; | 1450 PartitionMemoryStats partitionStats = {0}; |
| 1474 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages; | 1451 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages; |
| 1475 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; | 1452 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; |
| 1476 ASSERT(!partition->totalSizeOfDirectMappedPages); | 1453 DCHECK(!partition->totalSizeOfDirectMappedPages); |
| 1477 for (size_t i = 0; i < partitionNumBuckets; ++i) { | 1454 for (size_t i = 0; i < partitionNumBuckets; ++i) { |
| 1478 if (memoryStats[i].isValid) { | 1455 if (memoryStats[i].isValid) { |
| 1479 partitionStats.totalResidentBytes += memoryStats[i].residentBytes; | 1456 partitionStats.totalResidentBytes += memoryStats[i].residentBytes; |
| 1480 partitionStats.totalActiveBytes += memoryStats[i].activeBytes; | 1457 partitionStats.totalActiveBytes += memoryStats[i].activeBytes; |
| 1481 partitionStats.totalDecommittableBytes += | 1458 partitionStats.totalDecommittableBytes += |
| 1482 memoryStats[i].decommittableBytes; | 1459 memoryStats[i].decommittableBytes; |
| 1483 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBytes; | 1460 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBytes; |
| 1484 if (!isLightDump) | 1461 if (!isLightDump) |
| 1485 partitionStatsDumper->partitionsDumpBucketStats(partitionName, | 1462 partitionStatsDumper->partitionsDumpBucketStats(partitionName, |
| 1486 &memoryStats[i]); | 1463 &memoryStats[i]); |
| 1487 } | 1464 } |
| 1488 } | 1465 } |
| 1489 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | 1466 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); |
| 1490 } | 1467 } |
| 1491 | 1468 |
| 1492 } // namespace WTF | 1469 } // namespace base |
| OLD | NEW |