OLD | NEW |
1 /* | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 // Use of this source code is governed by a BSD-style license that can be |
3 * | 3 // found in the LICENSE file. |
4 * Redistribution and use in source and binary forms, with or without | |
5 * modification, are permitted provided that the following conditions are | |
6 * met: | |
7 * | |
8 * * Redistributions of source code must retain the above copyright | |
9 * notice, this list of conditions and the following disclaimer. | |
10 * * Redistributions in binary form must reproduce the above | |
11 * copyright notice, this list of conditions and the following disclaimer | |
12 * in the documentation and/or other materials provided with the | |
13 * distribution. | |
14 * * Neither the name of Google Inc. nor the names of its | |
15 * contributors may be used to endorse or promote products derived from | |
16 * this software without specific prior written permission. | |
17 * | |
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 */ | |
30 | 4 |
31 #include "wtf/allocator/PartitionAlloc.h" | 5 #include "base/allocator/partition_allocator/partition_alloc.h" |
32 | 6 |
33 #include <string.h> | 7 #include <string.h> |
34 | 8 |
35 #ifndef NDEBUG | 9 #include "base/compiler_specific.h" |
36 #include <stdio.h> | 10 #include "base/synchronization/spin_lock.h" |
37 #endif | |
38 | 11 |
39 // Two partition pages are used as guard / metadata page so make sure the super | 12 // Two partition pages are used as guard / metadata page so make sure the super |
40 // page size is bigger. | 13 // page size is bigger. |
41 static_assert(WTF::kPartitionPageSize * 4 <= WTF::kSuperPageSize, | 14 static_assert(base::kPartitionPageSize * 4 <= base::kSuperPageSize, |
42 "ok super page size"); | 15 "ok super page size"); |
43 static_assert(!(WTF::kSuperPageSize % WTF::kPartitionPageSize), | 16 static_assert(!(base::kSuperPageSize % base::kPartitionPageSize), |
44 "ok super page multiple"); | 17 "ok super page multiple"); |
45 // Four system pages gives us room to hack out a still-guard-paged piece | 18 // Four system pages gives us room to hack out a still-guard-paged piece |
46 // of metadata in the middle of a guard partition page. | 19 // of metadata in the middle of a guard partition page. |
47 static_assert(WTF::kSystemPageSize * 4 <= WTF::kPartitionPageSize, | 20 static_assert(base::kSystemPageSize * 4 <= base::kPartitionPageSize, |
48 "ok partition page size"); | 21 "ok partition page size"); |
49 static_assert(!(WTF::kPartitionPageSize % WTF::kSystemPageSize), | 22 static_assert(!(base::kPartitionPageSize % base::kSystemPageSize), |
50 "ok partition page multiple"); | 23 "ok partition page multiple"); |
51 static_assert(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, | 24 static_assert(sizeof(base::PartitionPage) <= base::kPageMetadataSize, |
52 "PartitionPage should not be too big"); | 25 "PartitionPage should not be too big"); |
53 static_assert(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, | 26 static_assert(sizeof(base::PartitionBucket) <= base::kPageMetadataSize, |
54 "PartitionBucket should not be too big"); | 27 "PartitionBucket should not be too big"); |
55 static_assert(sizeof(WTF::PartitionSuperPageExtentEntry) <= | 28 static_assert(sizeof(base::PartitionSuperPageExtentEntry) <= |
56 WTF::kPageMetadataSize, | 29 base::kPageMetadataSize, |
57 "PartitionSuperPageExtentEntry should not be too big"); | 30 "PartitionSuperPageExtentEntry should not be too big"); |
58 static_assert(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= | 31 static_assert(base::kPageMetadataSize * base::kNumPartitionPagesPerSuperPage <= |
59 WTF::kSystemPageSize, | 32 base::kSystemPageSize, |
60 "page metadata fits in hole"); | 33 "page metadata fits in hole"); |
61 // Check that some of our zanier calculations worked out as expected. | 34 // Check that some of our zanier calculations worked out as expected. |
62 static_assert(WTF::kGenericSmallestBucket == 8, "generic smallest bucket"); | 35 static_assert(base::kGenericSmallestBucket == 8, "generic smallest bucket"); |
63 static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed"); | 36 static_assert(base::kGenericMaxBucketed == 983040, "generic max bucketed"); |
64 static_assert(WTF::kMaxSystemPagesPerSlotSpan < (1 << 8), | 37 static_assert(base::kMaxSystemPagesPerSlotSpan < (1 << 8), |
65 "System pages per slot span must be less than 128."); | 38 "System pages per slot span must be less than 128."); |
66 | 39 |
67 namespace WTF { | 40 namespace base { |
68 | 41 |
69 SpinLock PartitionRootBase::gInitializedLock; | 42 subtle::SpinLock PartitionRootBase::gInitializedLock; |
70 bool PartitionRootBase::gInitialized = false; | 43 bool PartitionRootBase::gInitialized = false; |
71 PartitionPage PartitionRootBase::gSeedPage; | 44 PartitionPage PartitionRootBase::gSeedPage; |
72 PartitionBucket PartitionRootBase::gPagedBucket; | 45 PartitionBucket PartitionRootBase::gPagedBucket; |
73 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; | 46 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; |
74 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = | 47 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = |
75 nullptr; | 48 nullptr; |
76 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; | 49 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; |
77 | 50 |
78 static uint8_t partitionBucketNumSystemPages(size_t size) { | 51 static uint8_t partitionBucketNumSystemPages(size_t size) { |
79 // This works out reasonably for the current bucket sizes of the generic | 52 // This works out reasonably for the current bucket sizes of the generic |
80 // allocator, and the current values of partition page size and constants. | 53 // allocator, and the current values of partition page size and constants. |
81 // Specifically, we have enough room to always pack the slots perfectly into | 54 // Specifically, we have enough room to always pack the slots perfectly into |
82 // some number of system pages. The only waste is the waste associated with | 55 // some number of system pages. The only waste is the waste associated with |
83 // unfaulted pages (i.e. wasted address space). | 56 // unfaulted pages (i.e. wasted address space). |
84 // TODO: we end up using a lot of system pages for very small sizes. For | 57 // TODO: we end up using a lot of system pages for very small sizes. For |
85 // example, we'll use 12 system pages for slot size 24. The slot size is | 58 // example, we'll use 12 system pages for slot size 24. The slot size is |
86 // so small that the waste would be tiny with just 4, or 1, system pages. | 59 // so small that the waste would be tiny with just 4, or 1, system pages. |
87 // Later, we can investigate whether there are anti-fragmentation benefits | 60 // Later, we can investigate whether there are anti-fragmentation benefits |
88 // to using fewer system pages. | 61 // to using fewer system pages. |
89 double bestWasteRatio = 1.0f; | 62 double bestWasteRatio = 1.0f; |
90 uint16_t bestPages = 0; | 63 uint16_t bestPages = 0; |
91 if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { | 64 if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { |
92 ASSERT(!(size % kSystemPageSize)); | 65 DCHECK(!(size % kSystemPageSize)); |
93 bestPages = static_cast<uint16_t>(size / kSystemPageSize); | 66 bestPages = static_cast<uint16_t>(size / kSystemPageSize); |
94 RELEASE_ASSERT(bestPages < (1 << 8)); | 67 CHECK(bestPages < (1 << 8)); |
95 return static_cast<uint8_t>(bestPages); | 68 return static_cast<uint8_t>(bestPages); |
96 } | 69 } |
97 ASSERT(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); | 70 DCHECK(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); |
98 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; | 71 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; |
99 i <= kMaxSystemPagesPerSlotSpan; ++i) { | 72 i <= kMaxSystemPagesPerSlotSpan; ++i) { |
100 size_t pageSize = kSystemPageSize * i; | 73 size_t pageSize = kSystemPageSize * i; |
101 size_t numSlots = pageSize / size; | 74 size_t numSlots = pageSize / size; |
102 size_t waste = pageSize - (numSlots * size); | 75 size_t waste = pageSize - (numSlots * size); |
103 // Leaving a page unfaulted is not free; the page will occupy an empty page | 76 // Leaving a page unfaulted is not free; the page will occupy an empty page |
104 // table entry. Make a simple attempt to account for that. | 77 // table entry. Make a simple attempt to account for that. |
105 size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); | 78 size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); |
106 size_t numUnfaultedPages = | 79 size_t numUnfaultedPages = |
107 numRemainderPages | 80 numRemainderPages |
108 ? (kNumSystemPagesPerPartitionPage - numRemainderPages) | 81 ? (kNumSystemPagesPerPartitionPage - numRemainderPages) |
109 : 0; | 82 : 0; |
110 waste += sizeof(void*) * numUnfaultedPages; | 83 waste += sizeof(void*) * numUnfaultedPages; |
111 double wasteRatio = (double)waste / (double)pageSize; | 84 double wasteRatio = (double)waste / (double)pageSize; |
112 if (wasteRatio < bestWasteRatio) { | 85 if (wasteRatio < bestWasteRatio) { |
113 bestWasteRatio = wasteRatio; | 86 bestWasteRatio = wasteRatio; |
114 bestPages = i; | 87 bestPages = i; |
115 } | 88 } |
116 } | 89 } |
117 ASSERT(bestPages > 0); | 90 DCHECK(bestPages > 0); |
118 RELEASE_ASSERT(bestPages <= kMaxSystemPagesPerSlotSpan); | 91 CHECK(bestPages <= kMaxSystemPagesPerSlotSpan); |
119 return static_cast<uint8_t>(bestPages); | 92 return static_cast<uint8_t>(bestPages); |
120 } | 93 } |
121 | 94 |
122 static void partitionAllocBaseInit(PartitionRootBase* root) { | 95 static void partitionAllocBaseInit(PartitionRootBase* root) { |
123 ASSERT(!root->initialized); | 96 DCHECK(!root->initialized); |
124 { | 97 { |
125 SpinLock::Guard guard(PartitionRootBase::gInitializedLock); | 98 subtle::SpinLock::Guard guard(PartitionRootBase::gInitializedLock); |
126 if (!PartitionRootBase::gInitialized) { | 99 if (!PartitionRootBase::gInitialized) { |
127 PartitionRootBase::gInitialized = true; | 100 PartitionRootBase::gInitialized = true; |
128 // We mark the seed page as free to make sure it is skipped by our | 101 // We mark the seed page as free to make sure it is skipped by our |
129 // logic to find a new active page. | 102 // logic to find a new active page. |
130 PartitionRootBase::gPagedBucket.activePagesHead = | 103 PartitionRootBase::gPagedBucket.activePagesHead = |
131 &PartitionRootGeneric::gSeedPage; | 104 &PartitionRootGeneric::gSeedPage; |
132 } | 105 } |
133 } | 106 } |
134 | 107 |
135 root->initialized = true; | 108 root->initialized = true; |
(...skipping 18 matching lines...) Expand all Loading... |
154 PartitionRootBase* root) { | 127 PartitionRootBase* root) { |
155 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | 128 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; |
156 bucket->emptyPagesHead = 0; | 129 bucket->emptyPagesHead = 0; |
157 bucket->decommittedPagesHead = 0; | 130 bucket->decommittedPagesHead = 0; |
158 bucket->numFullPages = 0; | 131 bucket->numFullPages = 0; |
159 bucket->numSystemPagesPerSlotSpan = | 132 bucket->numSystemPagesPerSlotSpan = |
160 partitionBucketNumSystemPages(bucket->slotSize); | 133 partitionBucketNumSystemPages(bucket->slotSize); |
161 } | 134 } |
162 | 135 |
163 void partitionAllocGlobalInit(void (*oomHandlingFunction)()) { | 136 void partitionAllocGlobalInit(void (*oomHandlingFunction)()) { |
164 ASSERT(oomHandlingFunction); | 137 DCHECK(oomHandlingFunction); |
165 PartitionRootBase::gOomHandlingFunction = oomHandlingFunction; | 138 PartitionRootBase::gOomHandlingFunction = oomHandlingFunction; |
166 } | 139 } |
167 | 140 |
168 void partitionAllocInit(PartitionRoot* root, | 141 void partitionAllocInit(PartitionRoot* root, |
169 size_t numBuckets, | 142 size_t numBuckets, |
170 size_t maxAllocation) { | 143 size_t maxAllocation) { |
171 partitionAllocBaseInit(root); | 144 partitionAllocBaseInit(root); |
172 | 145 |
173 root->numBuckets = numBuckets; | 146 root->numBuckets = numBuckets; |
174 root->maxAllocation = maxAllocation; | 147 root->maxAllocation = maxAllocation; |
175 size_t i; | 148 size_t i; |
176 for (i = 0; i < root->numBuckets; ++i) { | 149 for (i = 0; i < root->numBuckets; ++i) { |
177 PartitionBucket* bucket = &root->buckets()[i]; | 150 PartitionBucket* bucket = &root->buckets()[i]; |
178 if (!i) | 151 if (!i) |
179 bucket->slotSize = kAllocationGranularity; | 152 bucket->slotSize = kAllocationGranularity; |
180 else | 153 else |
181 bucket->slotSize = i << kBucketShift; | 154 bucket->slotSize = i << kBucketShift; |
182 partitionBucketInitBase(bucket, root); | 155 partitionBucketInitBase(bucket, root); |
183 } | 156 } |
184 } | 157 } |
185 | 158 |
186 void partitionAllocGenericInit(PartitionRootGeneric* root) { | 159 void partitionAllocGenericInit(PartitionRootGeneric* root) { |
187 SpinLock::Guard guard(root->lock); | 160 subtle::SpinLock::Guard guard(root->lock); |
188 | 161 |
189 partitionAllocBaseInit(root); | 162 partitionAllocBaseInit(root); |
190 | 163 |
191 // Precalculate some shift and mask constants used in the hot path. | 164 // Precalculate some shift and mask constants used in the hot path. |
192 // Example: malloc(41) == 101001 binary. | 165 // Example: malloc(41) == 101001 binary. |
193 // Order is 6 (1 << 6-1)==32 is highest bit set. | 166 // Order is 6 (1 << 6-1)==32 is highest bit set. |
194 // orderIndex is the next three MSB == 010 == 2. | 167 // orderIndex is the next three MSB == 010 == 2. |
195 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for | 168 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for |
196 // the subOrderIndex). | 169 // the subOrderIndex). |
197 size_t order; | 170 size_t order; |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
230 bucket->slotSize = currentSize; | 203 bucket->slotSize = currentSize; |
231 partitionBucketInitBase(bucket, root); | 204 partitionBucketInitBase(bucket, root); |
232 // Disable psuedo buckets so that touching them faults. | 205 // Disable psuedo buckets so that touching them faults. |
233 if (currentSize % kGenericSmallestBucket) | 206 if (currentSize % kGenericSmallestBucket) |
234 bucket->activePagesHead = 0; | 207 bucket->activePagesHead = 0; |
235 currentSize += currentIncrement; | 208 currentSize += currentIncrement; |
236 ++bucket; | 209 ++bucket; |
237 } | 210 } |
238 currentIncrement <<= 1; | 211 currentIncrement <<= 1; |
239 } | 212 } |
240 ASSERT(currentSize == 1 << kGenericMaxBucketedOrder); | 213 DCHECK(currentSize == 1 << kGenericMaxBucketedOrder); |
241 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); | 214 DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); |
242 | 215 |
243 // Then set up the fast size -> bucket lookup table. | 216 // Then set up the fast size -> bucket lookup table. |
244 bucket = &root->buckets[0]; | 217 bucket = &root->buckets[0]; |
245 PartitionBucket** bucketPtr = &root->bucketLookups[0]; | 218 PartitionBucket** bucketPtr = &root->bucketLookups[0]; |
246 for (order = 0; order <= kBitsPerSizet; ++order) { | 219 for (order = 0; order <= kBitsPerSizet; ++order) { |
247 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { | 220 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { |
248 if (order < kGenericMinBucketedOrder) { | 221 if (order < kGenericMinBucketedOrder) { |
249 // Use the bucket of the finest granularity for malloc(0) etc. | 222 // Use the bucket of the finest granularity for malloc(0) etc. |
250 *bucketPtr++ = &root->buckets[0]; | 223 *bucketPtr++ = &root->buckets[0]; |
251 } else if (order > kGenericMaxBucketedOrder) { | 224 } else if (order > kGenericMaxBucketedOrder) { |
252 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket; | 225 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket; |
253 } else { | 226 } else { |
254 PartitionBucket* validBucket = bucket; | 227 PartitionBucket* validBucket = bucket; |
255 // Skip over invalid buckets. | 228 // Skip over invalid buckets. |
256 while (validBucket->slotSize % kGenericSmallestBucket) | 229 while (validBucket->slotSize % kGenericSmallestBucket) |
257 validBucket++; | 230 validBucket++; |
258 *bucketPtr++ = validBucket; | 231 *bucketPtr++ = validBucket; |
259 bucket++; | 232 bucket++; |
260 } | 233 } |
261 } | 234 } |
262 } | 235 } |
263 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); | 236 DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); |
264 ASSERT(bucketPtr == | 237 DCHECK(bucketPtr == |
265 &root->bucketLookups[0] + | 238 &root->bucketLookups[0] + |
266 ((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder)); | 239 ((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder)); |
267 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), | 240 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), |
268 // which tries to overflow to a non-existant order. | 241 // which tries to overflow to a non-existant order. |
269 *bucketPtr = &PartitionRootGeneric::gPagedBucket; | 242 *bucketPtr = &PartitionRootGeneric::gPagedBucket; |
270 } | 243 } |
271 | 244 |
272 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) { | 245 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) { |
273 // Failure here indicates a memory leak. | 246 // Failure here indicates a memory leak. |
274 bool foundLeak = bucket->numFullPages; | 247 bool foundLeak = bucket->numFullPages; |
275 for (PartitionPage* page = bucket->activePagesHead; page; | 248 for (PartitionPage* page = bucket->activePagesHead; page; |
276 page = page->nextPage) | 249 page = page->nextPage) |
277 foundLeak |= (page->numAllocatedSlots > 0); | 250 foundLeak |= (page->numAllocatedSlots > 0); |
278 return foundLeak; | 251 return foundLeak; |
279 } | 252 } |
280 | 253 |
281 static bool partitionAllocBaseShutdown(PartitionRootBase* root) { | 254 static bool partitionAllocBaseShutdown(PartitionRootBase* root) { |
282 ASSERT(root->initialized); | 255 DCHECK(root->initialized); |
283 root->initialized = false; | 256 root->initialized = false; |
284 | 257 |
285 // Now that we've examined all partition pages in all buckets, it's safe | 258 // Now that we've examined all partition pages in all buckets, it's safe |
286 // to free all our super pages. Since the super page extent entries are | 259 // to free all our super pages. Since the super page extent entries are |
287 // stored in the super pages, we need to be careful not to access them | 260 // stored in the super pages, we need to be careful not to access them |
288 // after we've released the corresponding super page. | 261 // after we've released the corresponding super page. |
289 PartitionSuperPageExtentEntry* entry = root->firstExtent; | 262 PartitionSuperPageExtentEntry* entry = root->firstExtent; |
290 while (entry) { | 263 while (entry) { |
291 PartitionSuperPageExtentEntry* nextEntry = entry->next; | 264 PartitionSuperPageExtentEntry* nextEntry = entry->next; |
292 char* superPage = entry->superPageBase; | 265 char* superPage = entry->superPageBase; |
(...skipping 12 matching lines...) Expand all Loading... |
305 size_t i; | 278 size_t i; |
306 for (i = 0; i < root->numBuckets; ++i) { | 279 for (i = 0; i < root->numBuckets; ++i) { |
307 PartitionBucket* bucket = &root->buckets()[i]; | 280 PartitionBucket* bucket = &root->buckets()[i]; |
308 foundLeak |= partitionAllocShutdownBucket(bucket); | 281 foundLeak |= partitionAllocShutdownBucket(bucket); |
309 } | 282 } |
310 foundLeak |= partitionAllocBaseShutdown(root); | 283 foundLeak |= partitionAllocBaseShutdown(root); |
311 return !foundLeak; | 284 return !foundLeak; |
312 } | 285 } |
313 | 286 |
314 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) { | 287 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) { |
315 SpinLock::Guard guard(root->lock); | 288 subtle::SpinLock::Guard guard(root->lock); |
316 bool foundLeak = false; | 289 bool foundLeak = false; |
317 size_t i; | 290 size_t i; |
318 for (i = 0; i < kGenericNumBuckets; ++i) { | 291 for (i = 0; i < kGenericNumBuckets; ++i) { |
319 PartitionBucket* bucket = &root->buckets[i]; | 292 PartitionBucket* bucket = &root->buckets[i]; |
320 foundLeak |= partitionAllocShutdownBucket(bucket); | 293 foundLeak |= partitionAllocShutdownBucket(bucket); |
321 } | 294 } |
322 foundLeak |= partitionAllocBaseShutdown(root); | 295 foundLeak |= partitionAllocBaseShutdown(root); |
323 return !foundLeak; | 296 return !foundLeak; |
324 } | 297 } |
325 | 298 |
326 #if !CPU(64BIT) | 299 #if !defined(ARCH_CPU_64_BITS) |
327 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() { | 300 static NOINLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() { |
328 OOM_CRASH(); | 301 OOM_CRASH(); |
329 } | 302 } |
330 #endif | 303 #endif |
331 | 304 |
332 static NEVER_INLINE void partitionOutOfMemory(const PartitionRootBase* root) { | 305 static NOINLINE void partitionOutOfMemory(const PartitionRootBase* root) { |
333 #if !CPU(64BIT) | 306 #if !defined(ARCH_CPU_64_BITS) |
334 // Check whether this OOM is due to a lot of super pages that are allocated | 307 // Check whether this OOM is due to a lot of super pages that are allocated |
335 // but not committed, probably due to http://crbug.com/421387. | 308 // but not committed, probably due to http://crbug.com/421387. |
336 if (root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages - | 309 if (root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages - |
337 root->totalSizeOfCommittedPages > | 310 root->totalSizeOfCommittedPages > |
338 kReasonableSizeOfUnusedPages) { | 311 kReasonableSizeOfUnusedPages) { |
339 partitionOutOfMemoryWithLotsOfUncommitedPages(); | 312 partitionOutOfMemoryWithLotsOfUncommitedPages(); |
340 } | 313 } |
341 #endif | 314 #endif |
342 if (PartitionRootBase::gOomHandlingFunction) | 315 if (PartitionRootBase::gOomHandlingFunction) |
343 (*PartitionRootBase::gOomHandlingFunction)(); | 316 (*PartitionRootBase::gOomHandlingFunction)(); |
344 OOM_CRASH(); | 317 OOM_CRASH(); |
345 } | 318 } |
346 | 319 |
347 static NEVER_INLINE void partitionExcessiveAllocationSize() { | 320 static NOINLINE void partitionExcessiveAllocationSize() { |
348 OOM_CRASH(); | 321 OOM_CRASH(); |
349 } | 322 } |
350 | 323 |
351 static NEVER_INLINE void partitionBucketFull() { | 324 static NOINLINE void partitionBucketFull() { |
352 OOM_CRASH(); | 325 OOM_CRASH(); |
353 } | 326 } |
354 | 327 |
355 // partitionPageStateIs* | 328 // partitionPageStateIs* |
356 // Note that it's only valid to call these functions on pages found on one of | 329 // Note that it's only valid to call these functions on pages found on one of |
357 // the page lists. Specifically, you can't call these functions on full pages | 330 // the page lists. Specifically, you can't call these functions on full pages |
358 // that were detached from the active list. | 331 // that were detached from the active list. |
359 static bool ALWAYS_INLINE | 332 static bool ALWAYS_INLINE |
360 partitionPageStateIsActive(const PartitionPage* page) { | 333 partitionPageStateIsActive(const PartitionPage* page) { |
361 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 334 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
362 ASSERT(!page->pageOffset); | 335 DCHECK(!page->pageOffset); |
363 return (page->numAllocatedSlots > 0 && | 336 return (page->numAllocatedSlots > 0 && |
364 (page->freelistHead || page->numUnprovisionedSlots)); | 337 (page->freelistHead || page->numUnprovisionedSlots)); |
365 } | 338 } |
366 | 339 |
367 static bool ALWAYS_INLINE partitionPageStateIsFull(const PartitionPage* page) { | 340 static bool ALWAYS_INLINE partitionPageStateIsFull(const PartitionPage* page) { |
368 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 341 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
369 ASSERT(!page->pageOffset); | 342 DCHECK(!page->pageOffset); |
370 bool ret = (page->numAllocatedSlots == partitionBucketSlots(page->bucket)); | 343 bool ret = (page->numAllocatedSlots == partitionBucketSlots(page->bucket)); |
371 if (ret) { | 344 if (ret) { |
372 ASSERT(!page->freelistHead); | 345 DCHECK(!page->freelistHead); |
373 ASSERT(!page->numUnprovisionedSlots); | 346 DCHECK(!page->numUnprovisionedSlots); |
374 } | 347 } |
375 return ret; | 348 return ret; |
376 } | 349 } |
377 | 350 |
378 static bool ALWAYS_INLINE partitionPageStateIsEmpty(const PartitionPage* page) { | 351 static bool ALWAYS_INLINE partitionPageStateIsEmpty(const PartitionPage* page) { |
379 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 352 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
380 ASSERT(!page->pageOffset); | 353 DCHECK(!page->pageOffset); |
381 return (!page->numAllocatedSlots && page->freelistHead); | 354 return (!page->numAllocatedSlots && page->freelistHead); |
382 } | 355 } |
383 | 356 |
384 static bool ALWAYS_INLINE | 357 static bool ALWAYS_INLINE |
385 partitionPageStateIsDecommitted(const PartitionPage* page) { | 358 partitionPageStateIsDecommitted(const PartitionPage* page) { |
386 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 359 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
387 ASSERT(!page->pageOffset); | 360 DCHECK(!page->pageOffset); |
388 bool ret = (!page->numAllocatedSlots && !page->freelistHead); | 361 bool ret = (!page->numAllocatedSlots && !page->freelistHead); |
389 if (ret) { | 362 if (ret) { |
390 ASSERT(!page->numUnprovisionedSlots); | 363 DCHECK(!page->numUnprovisionedSlots); |
391 ASSERT(page->emptyCacheIndex == -1); | 364 DCHECK(page->emptyCacheIndex == -1); |
392 } | 365 } |
393 return ret; | 366 return ret; |
394 } | 367 } |
395 | 368 |
396 static void partitionIncreaseCommittedPages(PartitionRootBase* root, | 369 static void partitionIncreaseCommittedPages(PartitionRootBase* root, |
397 size_t len) { | 370 size_t len) { |
398 root->totalSizeOfCommittedPages += len; | 371 root->totalSizeOfCommittedPages += len; |
399 ASSERT(root->totalSizeOfCommittedPages <= | 372 DCHECK(root->totalSizeOfCommittedPages <= |
400 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); | 373 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); |
401 } | 374 } |
402 | 375 |
403 static void partitionDecreaseCommittedPages(PartitionRootBase* root, | 376 static void partitionDecreaseCommittedPages(PartitionRootBase* root, |
404 size_t len) { | 377 size_t len) { |
405 root->totalSizeOfCommittedPages -= len; | 378 root->totalSizeOfCommittedPages -= len; |
406 ASSERT(root->totalSizeOfCommittedPages <= | 379 DCHECK(root->totalSizeOfCommittedPages <= |
407 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); | 380 root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); |
408 } | 381 } |
409 | 382 |
410 static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, | 383 static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, |
411 void* addr, | 384 void* addr, |
412 size_t len) { | 385 size_t len) { |
413 decommitSystemPages(addr, len); | 386 decommitSystemPages(addr, len); |
414 partitionDecreaseCommittedPages(root, len); | 387 partitionDecreaseCommittedPages(root, len); |
415 } | 388 } |
416 | 389 |
417 static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, | 390 static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, |
418 void* addr, | 391 void* addr, |
419 size_t len) { | 392 size_t len) { |
420 recommitSystemPages(addr, len); | 393 recommitSystemPages(addr, len); |
421 partitionIncreaseCommittedPages(root, len); | 394 partitionIncreaseCommittedPages(root, len); |
422 } | 395 } |
423 | 396 |
424 static ALWAYS_INLINE void* partitionAllocPartitionPages( | 397 static ALWAYS_INLINE void* partitionAllocPartitionPages( |
425 PartitionRootBase* root, | 398 PartitionRootBase* root, |
426 int flags, | 399 int flags, |
427 uint16_t numPartitionPages) { | 400 uint16_t numPartitionPages) { |
428 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % | 401 DCHECK(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % |
429 kPartitionPageSize)); | 402 kPartitionPageSize)); |
430 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % | 403 DCHECK(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % |
431 kPartitionPageSize)); | 404 kPartitionPageSize)); |
432 ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage); | 405 DCHECK(numPartitionPages <= kNumPartitionPagesPerSuperPage); |
433 size_t totalSize = kPartitionPageSize * numPartitionPages; | 406 size_t totalSize = kPartitionPageSize * numPartitionPages; |
434 size_t numPartitionPagesLeft = | 407 size_t numPartitionPagesLeft = |
435 (root->nextPartitionPageEnd - root->nextPartitionPage) >> | 408 (root->nextPartitionPageEnd - root->nextPartitionPage) >> |
436 kPartitionPageShift; | 409 kPartitionPageShift; |
437 if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { | 410 if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { |
438 // In this case, we can still hand out pages from the current super page | 411 // In this case, we can still hand out pages from the current super page |
439 // allocation. | 412 // allocation. |
440 char* ret = root->nextPartitionPage; | 413 char* ret = root->nextPartitionPage; |
441 root->nextPartitionPage += totalSize; | 414 root->nextPartitionPage += totalSize; |
442 partitionIncreaseCommittedPages(root, totalSize); | 415 partitionIncreaseCommittedPages(root, totalSize); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
492 // are unused, but we initialize them to 0 so that we get a clear signal | 465 // are unused, but we initialize them to 0 so that we get a clear signal |
493 // in case they are accidentally used. | 466 // in case they are accidentally used. |
494 latestExtent->superPageBase = 0; | 467 latestExtent->superPageBase = 0; |
495 latestExtent->superPagesEnd = 0; | 468 latestExtent->superPagesEnd = 0; |
496 latestExtent->next = 0; | 469 latestExtent->next = 0; |
497 | 470 |
498 PartitionSuperPageExtentEntry* currentExtent = root->currentExtent; | 471 PartitionSuperPageExtentEntry* currentExtent = root->currentExtent; |
499 bool isNewExtent = (superPage != requestedAddress); | 472 bool isNewExtent = (superPage != requestedAddress); |
500 if (UNLIKELY(isNewExtent)) { | 473 if (UNLIKELY(isNewExtent)) { |
501 if (UNLIKELY(!currentExtent)) { | 474 if (UNLIKELY(!currentExtent)) { |
502 ASSERT(!root->firstExtent); | 475 DCHECK(!root->firstExtent); |
503 root->firstExtent = latestExtent; | 476 root->firstExtent = latestExtent; |
504 } else { | 477 } else { |
505 ASSERT(currentExtent->superPageBase); | 478 DCHECK(currentExtent->superPageBase); |
506 currentExtent->next = latestExtent; | 479 currentExtent->next = latestExtent; |
507 } | 480 } |
508 root->currentExtent = latestExtent; | 481 root->currentExtent = latestExtent; |
509 latestExtent->superPageBase = superPage; | 482 latestExtent->superPageBase = superPage; |
510 latestExtent->superPagesEnd = superPage + kSuperPageSize; | 483 latestExtent->superPagesEnd = superPage + kSuperPageSize; |
511 } else { | 484 } else { |
512 // We allocated next to an existing extent so just nudge the size up a | 485 // We allocated next to an existing extent so just nudge the size up a |
513 // little. | 486 // little. |
514 ASSERT(currentExtent->superPagesEnd); | 487 DCHECK(currentExtent->superPagesEnd); |
515 currentExtent->superPagesEnd += kSuperPageSize; | 488 currentExtent->superPagesEnd += kSuperPageSize; |
516 ASSERT(ret >= currentExtent->superPageBase && | 489 DCHECK(ret >= currentExtent->superPageBase && |
517 ret < currentExtent->superPagesEnd); | 490 ret < currentExtent->superPagesEnd); |
518 } | 491 } |
519 return ret; | 492 return ret; |
520 } | 493 } |
521 | 494 |
522 static ALWAYS_INLINE uint16_t | 495 static ALWAYS_INLINE uint16_t |
523 partitionBucketPartitionPages(const PartitionBucket* bucket) { | 496 partitionBucketPartitionPages(const PartitionBucket* bucket) { |
524 return (bucket->numSystemPagesPerSlotSpan + | 497 return (bucket->numSystemPagesPerSlotSpan + |
525 (kNumSystemPagesPerPartitionPage - 1)) / | 498 (kNumSystemPagesPerPartitionPage - 1)) / |
526 kNumSystemPagesPerPartitionPage; | 499 kNumSystemPagesPerPartitionPage; |
527 } | 500 } |
528 | 501 |
529 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) { | 502 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) { |
530 ASSERT(partitionPageStateIsDecommitted(page)); | 503 DCHECK(partitionPageStateIsDecommitted(page)); |
531 | 504 |
532 page->numUnprovisionedSlots = partitionBucketSlots(page->bucket); | 505 page->numUnprovisionedSlots = partitionBucketSlots(page->bucket); |
533 ASSERT(page->numUnprovisionedSlots); | 506 DCHECK(page->numUnprovisionedSlots); |
534 | 507 |
535 page->nextPage = nullptr; | 508 page->nextPage = nullptr; |
536 } | 509 } |
537 | 510 |
538 static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, | 511 static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, |
539 PartitionBucket* bucket) { | 512 PartitionBucket* bucket) { |
540 // The bucket never changes. We set it up once. | 513 // The bucket never changes. We set it up once. |
541 page->bucket = bucket; | 514 page->bucket = bucket; |
542 page->emptyCacheIndex = -1; | 515 page->emptyCacheIndex = -1; |
543 | 516 |
(...skipping 10 matching lines...) Expand all Loading... |
554 for (uint16_t i = 1; i < numPartitionPages; ++i) { | 527 for (uint16_t i = 1; i < numPartitionPages; ++i) { |
555 pageCharPtr += kPageMetadataSize; | 528 pageCharPtr += kPageMetadataSize; |
556 PartitionPage* secondaryPage = | 529 PartitionPage* secondaryPage = |
557 reinterpret_cast<PartitionPage*>(pageCharPtr); | 530 reinterpret_cast<PartitionPage*>(pageCharPtr); |
558 secondaryPage->pageOffset = i; | 531 secondaryPage->pageOffset = i; |
559 } | 532 } |
560 } | 533 } |
561 | 534 |
562 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist( | 535 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist( |
563 PartitionPage* page) { | 536 PartitionPage* page) { |
564 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 537 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
565 uint16_t numSlots = page->numUnprovisionedSlots; | 538 uint16_t numSlots = page->numUnprovisionedSlots; |
566 ASSERT(numSlots); | 539 DCHECK(numSlots); |
567 PartitionBucket* bucket = page->bucket; | 540 PartitionBucket* bucket = page->bucket; |
568 // We should only get here when _every_ slot is either used or unprovisioned. | 541 // We should only get here when _every_ slot is either used or unprovisioned. |
569 // (The third state is "on the freelist". If we have a non-empty freelist, we | 542 // (The third state is "on the freelist". If we have a non-empty freelist, we |
570 // should not get here.) | 543 // should not get here.) |
571 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); | 544 DCHECK(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); |
572 // Similarly, make explicitly sure that the freelist is empty. | 545 // Similarly, make explicitly sure that the freelist is empty. |
573 ASSERT(!page->freelistHead); | 546 DCHECK(!page->freelistHead); |
574 ASSERT(page->numAllocatedSlots >= 0); | 547 DCHECK(page->numAllocatedSlots >= 0); |
575 | 548 |
576 size_t size = bucket->slotSize; | 549 size_t size = bucket->slotSize; |
577 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); | 550 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); |
578 char* returnObject = base + (size * page->numAllocatedSlots); | 551 char* returnObject = base + (size * page->numAllocatedSlots); |
579 char* firstFreelistPointer = returnObject + size; | 552 char* firstFreelistPointer = returnObject + size; |
580 char* firstFreelistPointerExtent = | 553 char* firstFreelistPointerExtent = |
581 firstFreelistPointer + sizeof(PartitionFreelistEntry*); | 554 firstFreelistPointer + sizeof(PartitionFreelistEntry*); |
582 // Our goal is to fault as few system pages as possible. We calculate the | 555 // Our goal is to fault as few system pages as possible. We calculate the |
583 // page containing the "end" of the returned slot, and then allow freelist | 556 // page containing the "end" of the returned slot, and then allow freelist |
584 // pointers to be written up to the end of that page. | 557 // pointers to be written up to the end of that page. |
585 char* subPageLimit = reinterpret_cast<char*>( | 558 char* subPageLimit = reinterpret_cast<char*>( |
586 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer))); | 559 roundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer))); |
587 char* slotsLimit = returnObject + (size * numSlots); | 560 char* slotsLimit = returnObject + (size * numSlots); |
588 char* freelistLimit = subPageLimit; | 561 char* freelistLimit = subPageLimit; |
589 if (UNLIKELY(slotsLimit < freelistLimit)) | 562 if (UNLIKELY(slotsLimit < freelistLimit)) |
590 freelistLimit = slotsLimit; | 563 freelistLimit = slotsLimit; |
591 | 564 |
592 uint16_t numNewFreelistEntries = 0; | 565 uint16_t numNewFreelistEntries = 0; |
593 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { | 566 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { |
594 // Only consider used space in the slot span. If we consider wasted | 567 // Only consider used space in the slot span. If we consider wasted |
595 // space, we may get an off-by-one when a freelist pointer fits in the | 568 // space, we may get an off-by-one when a freelist pointer fits in the |
596 // wasted space, but a slot does not. | 569 // wasted space, but a slot does not. |
597 // We know we can fit at least one freelist pointer. | 570 // We know we can fit at least one freelist pointer. |
598 numNewFreelistEntries = 1; | 571 numNewFreelistEntries = 1; |
599 // Any further entries require space for the whole slot span. | 572 // Any further entries require space for the whole slot span. |
600 numNewFreelistEntries += static_cast<uint16_t>( | 573 numNewFreelistEntries += static_cast<uint16_t>( |
601 (freelistLimit - firstFreelistPointerExtent) / size); | 574 (freelistLimit - firstFreelistPointerExtent) / size); |
602 } | 575 } |
603 | 576 |
604 // We always return an object slot -- that's the +1 below. | 577 // We always return an object slot -- that's the +1 below. |
605 // We do not neccessarily create any new freelist entries, because we cross | 578 // We do not neccessarily create any new freelist entries, because we cross |
606 // sub page boundaries frequently for large bucket sizes. | 579 // sub page boundaries frequently for large bucket sizes. |
607 ASSERT(numNewFreelistEntries + 1 <= numSlots); | 580 DCHECK(numNewFreelistEntries + 1 <= numSlots); |
608 numSlots -= (numNewFreelistEntries + 1); | 581 numSlots -= (numNewFreelistEntries + 1); |
609 page->numUnprovisionedSlots = numSlots; | 582 page->numUnprovisionedSlots = numSlots; |
610 page->numAllocatedSlots++; | 583 page->numAllocatedSlots++; |
611 | 584 |
612 if (LIKELY(numNewFreelistEntries)) { | 585 if (LIKELY(numNewFreelistEntries)) { |
613 char* freelistPointer = firstFreelistPointer; | 586 char* freelistPointer = firstFreelistPointer; |
614 PartitionFreelistEntry* entry = | 587 PartitionFreelistEntry* entry = |
615 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); | 588 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); |
616 page->freelistHead = entry; | 589 page->freelistHead = entry; |
617 while (--numNewFreelistEntries) { | 590 while (--numNewFreelistEntries) { |
(...skipping 20 matching lines...) Expand all Loading... |
638 // decommitted page list and full pages are unlinked from any list. | 611 // decommitted page list and full pages are unlinked from any list. |
639 static bool partitionSetNewActivePage(PartitionBucket* bucket) { | 612 static bool partitionSetNewActivePage(PartitionBucket* bucket) { |
640 PartitionPage* page = bucket->activePagesHead; | 613 PartitionPage* page = bucket->activePagesHead; |
641 if (page == &PartitionRootBase::gSeedPage) | 614 if (page == &PartitionRootBase::gSeedPage) |
642 return false; | 615 return false; |
643 | 616 |
644 PartitionPage* nextPage; | 617 PartitionPage* nextPage; |
645 | 618 |
646 for (; page; page = nextPage) { | 619 for (; page; page = nextPage) { |
647 nextPage = page->nextPage; | 620 nextPage = page->nextPage; |
648 ASSERT(page->bucket == bucket); | 621 DCHECK(page->bucket == bucket); |
649 ASSERT(page != bucket->emptyPagesHead); | 622 DCHECK(page != bucket->emptyPagesHead); |
650 ASSERT(page != bucket->decommittedPagesHead); | 623 DCHECK(page != bucket->decommittedPagesHead); |
651 | 624 |
652 // Deal with empty and decommitted pages. | 625 // Deal with empty and decommitted pages. |
653 if (LIKELY(partitionPageStateIsActive(page))) { | 626 if (LIKELY(partitionPageStateIsActive(page))) { |
654 // This page is usable because it has freelist entries, or has | 627 // This page is usable because it has freelist entries, or has |
655 // unprovisioned slots we can create freelist entries from. | 628 // unprovisioned slots we can create freelist entries from. |
656 bucket->activePagesHead = page; | 629 bucket->activePagesHead = page; |
657 return true; | 630 return true; |
658 } | 631 } |
659 if (LIKELY(partitionPageStateIsEmpty(page))) { | 632 if (LIKELY(partitionPageStateIsEmpty(page))) { |
660 page->nextPage = bucket->emptyPagesHead; | 633 page->nextPage = bucket->emptyPagesHead; |
661 bucket->emptyPagesHead = page; | 634 bucket->emptyPagesHead = page; |
662 } else if (LIKELY(partitionPageStateIsDecommitted(page))) { | 635 } else if (LIKELY(partitionPageStateIsDecommitted(page))) { |
663 page->nextPage = bucket->decommittedPagesHead; | 636 page->nextPage = bucket->decommittedPagesHead; |
664 bucket->decommittedPagesHead = page; | 637 bucket->decommittedPagesHead = page; |
665 } else { | 638 } else { |
666 ASSERT(partitionPageStateIsFull(page)); | 639 DCHECK(partitionPageStateIsFull(page)); |
667 // If we get here, we found a full page. Skip over it too, and also | 640 // If we get here, we found a full page. Skip over it too, and also |
668 // tag it as full (via a negative value). We need it tagged so that | 641 // tag it as full (via a negative value). We need it tagged so that |
669 // free'ing can tell, and move it back into the active page list. | 642 // free'ing can tell, and move it back into the active page list. |
670 page->numAllocatedSlots = -page->numAllocatedSlots; | 643 page->numAllocatedSlots = -page->numAllocatedSlots; |
671 ++bucket->numFullPages; | 644 ++bucket->numFullPages; |
672 // numFullPages is a uint16_t for efficient packing so guard against | 645 // numFullPages is a uint16_t for efficient packing so guard against |
673 // overflow to be safe. | 646 // overflow to be safe. |
674 if (UNLIKELY(!bucket->numFullPages)) | 647 if (UNLIKELY(!bucket->numFullPages)) |
675 partitionBucketFull(); | 648 partitionBucketFull(); |
676 // Not necessary but might help stop accidents. | 649 // Not necessary but might help stop accidents. |
677 page->nextPage = 0; | 650 page->nextPage = 0; |
678 } | 651 } |
679 } | 652 } |
680 | 653 |
681 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | 654 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; |
682 return false; | 655 return false; |
683 } | 656 } |
684 | 657 |
685 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent( | 658 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent( |
686 PartitionPage* page) { | 659 PartitionPage* page) { |
687 ASSERT(partitionBucketIsDirectMapped(page->bucket)); | 660 DCHECK(partitionBucketIsDirectMapped(page->bucket)); |
688 return reinterpret_cast<PartitionDirectMapExtent*>( | 661 return reinterpret_cast<PartitionDirectMapExtent*>( |
689 reinterpret_cast<char*>(page) + 3 * kPageMetadataSize); | 662 reinterpret_cast<char*>(page) + 3 * kPageMetadataSize); |
690 } | 663 } |
691 | 664 |
692 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, | 665 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, |
693 size_t size) { | 666 size_t size) { |
694 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | 667 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); |
695 if (UNLIKELY(rawSizePtr != nullptr)) | 668 if (UNLIKELY(rawSizePtr != nullptr)) |
696 *rawSizePtr = size; | 669 *rawSizePtr = size; |
697 } | 670 } |
698 | 671 |
699 static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root, | 672 static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root, |
700 int flags, | 673 int flags, |
701 size_t rawSize) { | 674 size_t rawSize) { |
702 size_t size = partitionDirectMapSize(rawSize); | 675 size_t size = partitionDirectMapSize(rawSize); |
703 | 676 |
704 // Because we need to fake looking like a super page, we need to allocate | 677 // Because we need to fake looking like a super page, we need to allocate |
705 // a bunch of system pages more than "size": | 678 // a bunch of system pages more than "size": |
706 // - The first few system pages are the partition page in which the super | 679 // - The first few system pages are the partition page in which the super |
707 // page metadata is stored. We fault just one system page out of a partition | 680 // page metadata is stored. We fault just one system page out of a partition |
708 // page sized clump. | 681 // page sized clump. |
709 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the | 682 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the |
710 // massive address space plus randomization instead). | 683 // massive address space plus randomization instead). |
711 size_t mapSize = size + kPartitionPageSize; | 684 size_t mapSize = size + kPartitionPageSize; |
712 #if !CPU(64BIT) | 685 #if !defined(ARCH_CPU_64_BITS) |
713 mapSize += kSystemPageSize; | 686 mapSize += kSystemPageSize; |
714 #endif | 687 #endif |
715 // Round up to the allocation granularity. | 688 // Round up to the allocation granularity. |
716 mapSize += kPageAllocationGranularityOffsetMask; | 689 mapSize += kPageAllocationGranularityOffsetMask; |
717 mapSize &= kPageAllocationGranularityBaseMask; | 690 mapSize &= kPageAllocationGranularityBaseMask; |
718 | 691 |
719 // TODO: these pages will be zero-filled. Consider internalizing an | 692 // TODO: these pages will be zero-filled. Consider internalizing an |
720 // allocZeroed() API so we can avoid a memset() entirely in this case. | 693 // allocZeroed() API so we can avoid a memset() entirely in this case. |
721 char* ptr = reinterpret_cast<char*>( | 694 char* ptr = reinterpret_cast<char*>( |
722 allocPages(0, mapSize, kSuperPageSize, PageAccessible)); | 695 allocPages(0, mapSize, kSuperPageSize, PageAccessible)); |
723 if (UNLIKELY(!ptr)) | 696 if (UNLIKELY(!ptr)) |
724 return nullptr; | 697 return nullptr; |
725 | 698 |
726 size_t committedPageSize = size + kSystemPageSize; | 699 size_t committedPageSize = size + kSystemPageSize; |
727 root->totalSizeOfDirectMappedPages += committedPageSize; | 700 root->totalSizeOfDirectMappedPages += committedPageSize; |
728 partitionIncreaseCommittedPages(root, committedPageSize); | 701 partitionIncreaseCommittedPages(root, committedPageSize); |
729 | 702 |
730 char* slot = ptr + kPartitionPageSize; | 703 char* slot = ptr + kPartitionPageSize; |
731 setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), | 704 setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), |
732 kPartitionPageSize - (kSystemPageSize * 2)); | 705 kPartitionPageSize - (kSystemPageSize * 2)); |
733 #if !CPU(64BIT) | 706 #if !defined(ARCH_CPU_64_BITS) |
734 setSystemPagesInaccessible(ptr, kSystemPageSize); | 707 setSystemPagesInaccessible(ptr, kSystemPageSize); |
735 setSystemPagesInaccessible(slot + size, kSystemPageSize); | 708 setSystemPagesInaccessible(slot + size, kSystemPageSize); |
736 #endif | 709 #endif |
737 | 710 |
738 PartitionSuperPageExtentEntry* extent = | 711 PartitionSuperPageExtentEntry* extent = |
739 reinterpret_cast<PartitionSuperPageExtentEntry*>( | 712 reinterpret_cast<PartitionSuperPageExtentEntry*>( |
740 partitionSuperPageToMetadataArea(ptr)); | 713 partitionSuperPageToMetadataArea(ptr)); |
741 extent->root = root; | 714 extent->root = root; |
742 // The new structures are all located inside a fresh system page so they | 715 // The new structures are all located inside a fresh system page so they |
743 // will all be zeroed out. These ASSERTs are for documentation. | 716 // will all be zeroed out. These DCHECKs are for documentation. |
744 ASSERT(!extent->superPageBase); | 717 DCHECK(!extent->superPageBase); |
745 ASSERT(!extent->superPagesEnd); | 718 DCHECK(!extent->superPagesEnd); |
746 ASSERT(!extent->next); | 719 DCHECK(!extent->next); |
747 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(slot); | 720 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(slot); |
748 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>( | 721 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>( |
749 reinterpret_cast<char*>(page) + (kPageMetadataSize * 2)); | 722 reinterpret_cast<char*>(page) + (kPageMetadataSize * 2)); |
750 ASSERT(!page->nextPage); | 723 DCHECK(!page->nextPage); |
751 ASSERT(!page->numAllocatedSlots); | 724 DCHECK(!page->numAllocatedSlots); |
752 ASSERT(!page->numUnprovisionedSlots); | 725 DCHECK(!page->numUnprovisionedSlots); |
753 ASSERT(!page->pageOffset); | 726 DCHECK(!page->pageOffset); |
754 ASSERT(!page->emptyCacheIndex); | 727 DCHECK(!page->emptyCacheIndex); |
755 page->bucket = bucket; | 728 page->bucket = bucket; |
756 page->freelistHead = reinterpret_cast<PartitionFreelistEntry*>(slot); | 729 page->freelistHead = reinterpret_cast<PartitionFreelistEntry*>(slot); |
757 PartitionFreelistEntry* nextEntry = | 730 PartitionFreelistEntry* nextEntry = |
758 reinterpret_cast<PartitionFreelistEntry*>(slot); | 731 reinterpret_cast<PartitionFreelistEntry*>(slot); |
759 nextEntry->next = partitionFreelistMask(0); | 732 nextEntry->next = partitionFreelistMask(0); |
760 | 733 |
761 ASSERT(!bucket->activePagesHead); | 734 DCHECK(!bucket->activePagesHead); |
762 ASSERT(!bucket->emptyPagesHead); | 735 DCHECK(!bucket->emptyPagesHead); |
763 ASSERT(!bucket->decommittedPagesHead); | 736 DCHECK(!bucket->decommittedPagesHead); |
764 ASSERT(!bucket->numSystemPagesPerSlotSpan); | 737 DCHECK(!bucket->numSystemPagesPerSlotSpan); |
765 ASSERT(!bucket->numFullPages); | 738 DCHECK(!bucket->numFullPages); |
766 bucket->slotSize = size; | 739 bucket->slotSize = size; |
767 | 740 |
768 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); | 741 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); |
769 mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; | 742 mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; |
770 mapExtent->bucket = bucket; | 743 mapExtent->bucket = bucket; |
771 | 744 |
772 // Maintain the doubly-linked list of all direct mappings. | 745 // Maintain the doubly-linked list of all direct mappings. |
773 mapExtent->nextExtent = root->directMapList; | 746 mapExtent->nextExtent = root->directMapList; |
774 if (mapExtent->nextExtent) | 747 if (mapExtent->nextExtent) |
775 mapExtent->nextExtent->prevExtent = mapExtent; | 748 mapExtent->nextExtent->prevExtent = mapExtent; |
776 mapExtent->prevExtent = nullptr; | 749 mapExtent->prevExtent = nullptr; |
777 root->directMapList = mapExtent; | 750 root->directMapList = mapExtent; |
778 | 751 |
779 return page; | 752 return page; |
780 } | 753 } |
781 | 754 |
782 static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) { | 755 static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) { |
783 PartitionRootBase* root = partitionPageToRoot(page); | 756 PartitionRootBase* root = partitionPageToRoot(page); |
784 const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page); | 757 const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page); |
785 size_t unmapSize = extent->mapSize; | 758 size_t unmapSize = extent->mapSize; |
786 | 759 |
787 // Maintain the doubly-linked list of all direct mappings. | 760 // Maintain the doubly-linked list of all direct mappings. |
788 if (extent->prevExtent) { | 761 if (extent->prevExtent) { |
789 ASSERT(extent->prevExtent->nextExtent == extent); | 762 DCHECK(extent->prevExtent->nextExtent == extent); |
790 extent->prevExtent->nextExtent = extent->nextExtent; | 763 extent->prevExtent->nextExtent = extent->nextExtent; |
791 } else { | 764 } else { |
792 root->directMapList = extent->nextExtent; | 765 root->directMapList = extent->nextExtent; |
793 } | 766 } |
794 if (extent->nextExtent) { | 767 if (extent->nextExtent) { |
795 ASSERT(extent->nextExtent->prevExtent == extent); | 768 DCHECK(extent->nextExtent->prevExtent == extent); |
796 extent->nextExtent->prevExtent = extent->prevExtent; | 769 extent->nextExtent->prevExtent = extent->prevExtent; |
797 } | 770 } |
798 | 771 |
799 // Add on the size of the trailing guard page and preceeding partition | 772 // Add on the size of the trailing guard page and preceeding partition |
800 // page. | 773 // page. |
801 unmapSize += kPartitionPageSize + kSystemPageSize; | 774 unmapSize += kPartitionPageSize + kSystemPageSize; |
802 | 775 |
803 size_t uncommittedPageSize = page->bucket->slotSize + kSystemPageSize; | 776 size_t uncommittedPageSize = page->bucket->slotSize + kSystemPageSize; |
804 partitionDecreaseCommittedPages(root, uncommittedPageSize); | 777 partitionDecreaseCommittedPages(root, uncommittedPageSize); |
805 ASSERT(root->totalSizeOfDirectMappedPages >= uncommittedPageSize); | 778 DCHECK(root->totalSizeOfDirectMappedPages >= uncommittedPageSize); |
806 root->totalSizeOfDirectMappedPages -= uncommittedPageSize; | 779 root->totalSizeOfDirectMappedPages -= uncommittedPageSize; |
807 | 780 |
808 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask)); | 781 DCHECK(!(unmapSize & kPageAllocationGranularityOffsetMask)); |
809 | 782 |
810 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 783 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
811 // Account for the mapping starting a partition page before the actual | 784 // Account for the mapping starting a partition page before the actual |
812 // allocation address. | 785 // allocation address. |
813 ptr -= kPartitionPageSize; | 786 ptr -= kPartitionPageSize; |
814 | 787 |
815 freePages(ptr, unmapSize); | 788 freePages(ptr, unmapSize); |
816 } | 789 } |
817 | 790 |
818 void* partitionAllocSlowPath(PartitionRootBase* root, | 791 void* partitionAllocSlowPath(PartitionRootBase* root, |
819 int flags, | 792 int flags, |
820 size_t size, | 793 size_t size, |
821 PartitionBucket* bucket) { | 794 PartitionBucket* bucket) { |
822 // The slow path is called when the freelist is empty. | 795 // The slow path is called when the freelist is empty. |
823 ASSERT(!bucket->activePagesHead->freelistHead); | 796 DCHECK(!bucket->activePagesHead->freelistHead); |
824 | 797 |
825 PartitionPage* newPage = nullptr; | 798 PartitionPage* newPage = nullptr; |
826 | 799 |
827 // For the partitionAllocGeneric API, we have a bunch of buckets marked | 800 // For the partitionAllocGeneric API, we have a bunch of buckets marked |
828 // as special cases. We bounce them through to the slow path so that we | 801 // as special cases. We bounce them through to the slow path so that we |
829 // can still have a blazing fast hot path due to lack of corner-case | 802 // can still have a blazing fast hot path due to lack of corner-case |
830 // branches. | 803 // branches. |
831 bool returnNull = flags & PartitionAllocReturnNull; | 804 bool returnNull = flags & PartitionAllocReturnNull; |
832 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 805 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
833 ASSERT(size > kGenericMaxBucketed); | 806 DCHECK(size > kGenericMaxBucketed); |
834 ASSERT(bucket == &PartitionRootBase::gPagedBucket); | 807 DCHECK(bucket == &PartitionRootBase::gPagedBucket); |
835 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); | 808 DCHECK(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); |
836 if (size > kGenericMaxDirectMapped) { | 809 if (size > kGenericMaxDirectMapped) { |
837 if (returnNull) | 810 if (returnNull) |
838 return nullptr; | 811 return nullptr; |
839 partitionExcessiveAllocationSize(); | 812 partitionExcessiveAllocationSize(); |
840 } | 813 } |
841 newPage = partitionDirectMap(root, flags, size); | 814 newPage = partitionDirectMap(root, flags, size); |
842 } else if (LIKELY(partitionSetNewActivePage(bucket))) { | 815 } else if (LIKELY(partitionSetNewActivePage(bucket))) { |
843 // First, did we find an active page in the active pages list? | 816 // First, did we find an active page in the active pages list? |
844 newPage = bucket->activePagesHead; | 817 newPage = bucket->activePagesHead; |
845 ASSERT(partitionPageStateIsActive(newPage)); | 818 DCHECK(partitionPageStateIsActive(newPage)); |
846 } else if (LIKELY(bucket->emptyPagesHead != nullptr) || | 819 } else if (LIKELY(bucket->emptyPagesHead != nullptr) || |
847 LIKELY(bucket->decommittedPagesHead != nullptr)) { | 820 LIKELY(bucket->decommittedPagesHead != nullptr)) { |
848 // Second, look in our lists of empty and decommitted pages. | 821 // Second, look in our lists of empty and decommitted pages. |
849 // Check empty pages first, which are preferred, but beware that an | 822 // Check empty pages first, which are preferred, but beware that an |
850 // empty page might have been decommitted. | 823 // empty page might have been decommitted. |
851 while (LIKELY((newPage = bucket->emptyPagesHead) != nullptr)) { | 824 while (LIKELY((newPage = bucket->emptyPagesHead) != nullptr)) { |
852 ASSERT(newPage->bucket == bucket); | 825 DCHECK(newPage->bucket == bucket); |
853 ASSERT(partitionPageStateIsEmpty(newPage) || | 826 DCHECK(partitionPageStateIsEmpty(newPage) || |
854 partitionPageStateIsDecommitted(newPage)); | 827 partitionPageStateIsDecommitted(newPage)); |
855 bucket->emptyPagesHead = newPage->nextPage; | 828 bucket->emptyPagesHead = newPage->nextPage; |
856 // Accept the empty page unless it got decommitted. | 829 // Accept the empty page unless it got decommitted. |
857 if (newPage->freelistHead) { | 830 if (newPage->freelistHead) { |
858 newPage->nextPage = nullptr; | 831 newPage->nextPage = nullptr; |
859 break; | 832 break; |
860 } | 833 } |
861 ASSERT(partitionPageStateIsDecommitted(newPage)); | 834 DCHECK(partitionPageStateIsDecommitted(newPage)); |
862 newPage->nextPage = bucket->decommittedPagesHead; | 835 newPage->nextPage = bucket->decommittedPagesHead; |
863 bucket->decommittedPagesHead = newPage; | 836 bucket->decommittedPagesHead = newPage; |
864 } | 837 } |
865 if (UNLIKELY(!newPage) && LIKELY(bucket->decommittedPagesHead != nullptr)) { | 838 if (UNLIKELY(!newPage) && LIKELY(bucket->decommittedPagesHead != nullptr)) { |
866 newPage = bucket->decommittedPagesHead; | 839 newPage = bucket->decommittedPagesHead; |
867 ASSERT(newPage->bucket == bucket); | 840 DCHECK(newPage->bucket == bucket); |
868 ASSERT(partitionPageStateIsDecommitted(newPage)); | 841 DCHECK(partitionPageStateIsDecommitted(newPage)); |
869 bucket->decommittedPagesHead = newPage->nextPage; | 842 bucket->decommittedPagesHead = newPage->nextPage; |
870 void* addr = partitionPageToPointer(newPage); | 843 void* addr = partitionPageToPointer(newPage); |
871 partitionRecommitSystemPages(root, addr, | 844 partitionRecommitSystemPages(root, addr, |
872 partitionBucketBytes(newPage->bucket)); | 845 partitionBucketBytes(newPage->bucket)); |
873 partitionPageReset(newPage); | 846 partitionPageReset(newPage); |
874 } | 847 } |
875 ASSERT(newPage); | 848 DCHECK(newPage); |
876 } else { | 849 } else { |
877 // Third. If we get here, we need a brand new page. | 850 // Third. If we get here, we need a brand new page. |
878 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | 851 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); |
879 void* rawPages = | 852 void* rawPages = |
880 partitionAllocPartitionPages(root, flags, numPartitionPages); | 853 partitionAllocPartitionPages(root, flags, numPartitionPages); |
881 if (LIKELY(rawPages != nullptr)) { | 854 if (LIKELY(rawPages != nullptr)) { |
882 newPage = partitionPointerToPageNoAlignmentCheck(rawPages); | 855 newPage = partitionPointerToPageNoAlignmentCheck(rawPages); |
883 partitionPageSetup(newPage, bucket); | 856 partitionPageSetup(newPage, bucket); |
884 } | 857 } |
885 } | 858 } |
886 | 859 |
887 // Bail if we had a memory allocation failure. | 860 // Bail if we had a memory allocation failure. |
888 if (UNLIKELY(!newPage)) { | 861 if (UNLIKELY(!newPage)) { |
889 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); | 862 DCHECK(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); |
890 if (returnNull) | 863 if (returnNull) |
891 return nullptr; | 864 return nullptr; |
892 partitionOutOfMemory(root); | 865 partitionOutOfMemory(root); |
893 } | 866 } |
894 | 867 |
895 bucket = newPage->bucket; | 868 bucket = newPage->bucket; |
896 ASSERT(bucket != &PartitionRootBase::gPagedBucket); | 869 DCHECK(bucket != &PartitionRootBase::gPagedBucket); |
897 bucket->activePagesHead = newPage; | 870 bucket->activePagesHead = newPage; |
898 partitionPageSetRawSize(newPage, size); | 871 partitionPageSetRawSize(newPage, size); |
899 | 872 |
900 // If we found an active page with free slots, or an empty page, we have a | 873 // If we found an active page with free slots, or an empty page, we have a |
901 // usable freelist head. | 874 // usable freelist head. |
902 if (LIKELY(newPage->freelistHead != nullptr)) { | 875 if (LIKELY(newPage->freelistHead != nullptr)) { |
903 PartitionFreelistEntry* entry = newPage->freelistHead; | 876 PartitionFreelistEntry* entry = newPage->freelistHead; |
904 PartitionFreelistEntry* newHead = partitionFreelistMask(entry->next); | 877 PartitionFreelistEntry* newHead = partitionFreelistMask(entry->next); |
905 newPage->freelistHead = newHead; | 878 newPage->freelistHead = newHead; |
906 newPage->numAllocatedSlots++; | 879 newPage->numAllocatedSlots++; |
907 return entry; | 880 return entry; |
908 } | 881 } |
909 // Otherwise, we need to build the freelist. | 882 // Otherwise, we need to build the freelist. |
910 ASSERT(newPage->numUnprovisionedSlots); | 883 DCHECK(newPage->numUnprovisionedSlots); |
911 return partitionPageAllocAndFillFreelist(newPage); | 884 return partitionPageAllocAndFillFreelist(newPage); |
912 } | 885 } |
913 | 886 |
914 static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, | 887 static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, |
915 PartitionPage* page) { | 888 PartitionPage* page) { |
916 ASSERT(partitionPageStateIsEmpty(page)); | 889 DCHECK(partitionPageStateIsEmpty(page)); |
917 ASSERT(!partitionBucketIsDirectMapped(page->bucket)); | 890 DCHECK(!partitionBucketIsDirectMapped(page->bucket)); |
918 void* addr = partitionPageToPointer(page); | 891 void* addr = partitionPageToPointer(page); |
919 partitionDecommitSystemPages(root, addr, partitionBucketBytes(page->bucket)); | 892 partitionDecommitSystemPages(root, addr, partitionBucketBytes(page->bucket)); |
920 | 893 |
921 // We actually leave the decommitted page in the active list. We'll sweep | 894 // We actually leave the decommitted page in the active list. We'll sweep |
922 // it on to the decommitted page list when we next walk the active page | 895 // it on to the decommitted page list when we next walk the active page |
923 // list. | 896 // list. |
924 // Pulling this trick enables us to use a singly-linked page list for all | 897 // Pulling this trick enables us to use a singly-linked page list for all |
925 // cases, which is critical in keeping the page metadata structure down to | 898 // cases, which is critical in keeping the page metadata structure down to |
926 // 32 bytes in size. | 899 // 32 bytes in size. |
927 page->freelistHead = 0; | 900 page->freelistHead = 0; |
928 page->numUnprovisionedSlots = 0; | 901 page->numUnprovisionedSlots = 0; |
929 ASSERT(partitionPageStateIsDecommitted(page)); | 902 DCHECK(partitionPageStateIsDecommitted(page)); |
930 } | 903 } |
931 | 904 |
932 static void partitionDecommitPageIfPossible(PartitionRootBase* root, | 905 static void partitionDecommitPageIfPossible(PartitionRootBase* root, |
933 PartitionPage* page) { | 906 PartitionPage* page) { |
934 ASSERT(page->emptyCacheIndex >= 0); | 907 DCHECK(page->emptyCacheIndex >= 0); |
935 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); | 908 DCHECK(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); |
936 ASSERT(page == root->globalEmptyPageRing[page->emptyCacheIndex]); | 909 DCHECK(page == root->globalEmptyPageRing[page->emptyCacheIndex]); |
937 page->emptyCacheIndex = -1; | 910 page->emptyCacheIndex = -1; |
938 if (partitionPageStateIsEmpty(page)) | 911 if (partitionPageStateIsEmpty(page)) |
939 partitionDecommitPage(root, page); | 912 partitionDecommitPage(root, page); |
940 } | 913 } |
941 | 914 |
942 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) { | 915 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) { |
943 ASSERT(partitionPageStateIsEmpty(page)); | 916 DCHECK(partitionPageStateIsEmpty(page)); |
944 PartitionRootBase* root = partitionPageToRoot(page); | 917 PartitionRootBase* root = partitionPageToRoot(page); |
945 | 918 |
946 // If the page is already registered as empty, give it another life. | 919 // If the page is already registered as empty, give it another life. |
947 if (page->emptyCacheIndex != -1) { | 920 if (page->emptyCacheIndex != -1) { |
948 ASSERT(page->emptyCacheIndex >= 0); | 921 DCHECK(page->emptyCacheIndex >= 0); |
949 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); | 922 DCHECK(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); |
950 ASSERT(root->globalEmptyPageRing[page->emptyCacheIndex] == page); | 923 DCHECK(root->globalEmptyPageRing[page->emptyCacheIndex] == page); |
951 root->globalEmptyPageRing[page->emptyCacheIndex] = 0; | 924 root->globalEmptyPageRing[page->emptyCacheIndex] = 0; |
952 } | 925 } |
953 | 926 |
954 int16_t currentIndex = root->globalEmptyPageRingIndex; | 927 int16_t currentIndex = root->globalEmptyPageRingIndex; |
955 PartitionPage* pageToDecommit = root->globalEmptyPageRing[currentIndex]; | 928 PartitionPage* pageToDecommit = root->globalEmptyPageRing[currentIndex]; |
956 // The page might well have been re-activated, filled up, etc. before we get | 929 // The page might well have been re-activated, filled up, etc. before we get |
957 // around to looking at it here. | 930 // around to looking at it here. |
958 if (pageToDecommit) | 931 if (pageToDecommit) |
959 partitionDecommitPageIfPossible(root, pageToDecommit); | 932 partitionDecommitPageIfPossible(root, pageToDecommit); |
960 | 933 |
(...skipping 13 matching lines...) Expand all Loading... |
974 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | 947 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { |
975 PartitionPage* page = root->globalEmptyPageRing[i]; | 948 PartitionPage* page = root->globalEmptyPageRing[i]; |
976 if (page) | 949 if (page) |
977 partitionDecommitPageIfPossible(root, page); | 950 partitionDecommitPageIfPossible(root, page); |
978 root->globalEmptyPageRing[i] = nullptr; | 951 root->globalEmptyPageRing[i] = nullptr; |
979 } | 952 } |
980 } | 953 } |
981 | 954 |
982 void partitionFreeSlowPath(PartitionPage* page) { | 955 void partitionFreeSlowPath(PartitionPage* page) { |
983 PartitionBucket* bucket = page->bucket; | 956 PartitionBucket* bucket = page->bucket; |
984 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 957 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
985 if (LIKELY(page->numAllocatedSlots == 0)) { | 958 if (LIKELY(page->numAllocatedSlots == 0)) { |
986 // Page became fully unused. | 959 // Page became fully unused. |
987 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 960 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
988 partitionDirectUnmap(page); | 961 partitionDirectUnmap(page); |
989 return; | 962 return; |
990 } | 963 } |
991 // If it's the current active page, change it. We bounce the page to | 964 // If it's the current active page, change it. We bounce the page to |
992 // the empty list as a force towards defragmentation. | 965 // the empty list as a force towards defragmentation. |
993 if (LIKELY(page == bucket->activePagesHead)) | 966 if (LIKELY(page == bucket->activePagesHead)) |
994 (void)partitionSetNewActivePage(bucket); | 967 (void)partitionSetNewActivePage(bucket); |
995 ASSERT(bucket->activePagesHead != page); | 968 DCHECK(bucket->activePagesHead != page); |
996 | 969 |
997 partitionPageSetRawSize(page, 0); | 970 partitionPageSetRawSize(page, 0); |
998 ASSERT(!partitionPageGetRawSize(page)); | 971 DCHECK(!partitionPageGetRawSize(page)); |
999 | 972 |
1000 partitionRegisterEmptyPage(page); | 973 partitionRegisterEmptyPage(page); |
1001 } else { | 974 } else { |
1002 ASSERT(!partitionBucketIsDirectMapped(bucket)); | 975 DCHECK(!partitionBucketIsDirectMapped(bucket)); |
1003 // Ensure that the page is full. That's the only valid case if we | 976 // Ensure that the page is full. That's the only valid case if we |
1004 // arrive here. | 977 // arrive here. |
1005 ASSERT(page->numAllocatedSlots < 0); | 978 DCHECK(page->numAllocatedSlots < 0); |
1006 // A transition of numAllocatedSlots from 0 to -1 is not legal, and | 979 // A transition of numAllocatedSlots from 0 to -1 is not legal, and |
1007 // likely indicates a double-free. | 980 // likely indicates a double-free. |
1008 SECURITY_CHECK(page->numAllocatedSlots != -1); | 981 CHECK(page->numAllocatedSlots != -1); |
1009 page->numAllocatedSlots = -page->numAllocatedSlots - 2; | 982 page->numAllocatedSlots = -page->numAllocatedSlots - 2; |
1010 ASSERT(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1); | 983 DCHECK(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1); |
1011 // Fully used page became partially used. It must be put back on the | 984 // Fully used page became partially used. It must be put back on the |
1012 // non-full page list. Also make it the current page to increase the | 985 // non-full page list. Also make it the current page to increase the |
1013 // chances of it being filled up again. The old current page will be | 986 // chances of it being filled up again. The old current page will be |
1014 // the next page. | 987 // the next page. |
1015 ASSERT(!page->nextPage); | 988 DCHECK(!page->nextPage); |
1016 if (LIKELY(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage)) | 989 if (LIKELY(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage)) |
1017 page->nextPage = bucket->activePagesHead; | 990 page->nextPage = bucket->activePagesHead; |
1018 bucket->activePagesHead = page; | 991 bucket->activePagesHead = page; |
1019 --bucket->numFullPages; | 992 --bucket->numFullPages; |
1020 // Special case: for a partition page with just a single slot, it may | 993 // Special case: for a partition page with just a single slot, it may |
1021 // now be empty and we want to run it through the empty logic. | 994 // now be empty and we want to run it through the empty logic. |
1022 if (UNLIKELY(page->numAllocatedSlots == 0)) | 995 if (UNLIKELY(page->numAllocatedSlots == 0)) |
1023 partitionFreeSlowPath(page); | 996 partitionFreeSlowPath(page); |
1024 } | 997 } |
1025 } | 998 } |
1026 | 999 |
1027 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, | 1000 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, |
1028 PartitionPage* page, | 1001 PartitionPage* page, |
1029 size_t rawSize) { | 1002 size_t rawSize) { |
1030 ASSERT(partitionBucketIsDirectMapped(page->bucket)); | 1003 DCHECK(partitionBucketIsDirectMapped(page->bucket)); |
1031 | 1004 |
1032 rawSize = partitionCookieSizeAdjustAdd(rawSize); | 1005 rawSize = partitionCookieSizeAdjustAdd(rawSize); |
1033 | 1006 |
1034 // Note that the new size might be a bucketed size; this function is called | 1007 // Note that the new size might be a bucketed size; this function is called |
1035 // whenever we're reallocating a direct mapped allocation. | 1008 // whenever we're reallocating a direct mapped allocation. |
1036 size_t newSize = partitionDirectMapSize(rawSize); | 1009 size_t newSize = partitionDirectMapSize(rawSize); |
1037 if (newSize < kGenericMinDirectMappedDownsize) | 1010 if (newSize < kGenericMinDirectMappedDownsize) |
1038 return false; | 1011 return false; |
1039 | 1012 |
1040 // bucket->slotSize is the current size of the allocation. | 1013 // bucket->slotSize is the current size of the allocation. |
(...skipping 13 matching lines...) Expand all Loading... |
1054 | 1027 |
1055 // Shrink by decommitting unneeded pages and making them inaccessible. | 1028 // Shrink by decommitting unneeded pages and making them inaccessible. |
1056 size_t decommitSize = currentSize - newSize; | 1029 size_t decommitSize = currentSize - newSize; |
1057 partitionDecommitSystemPages(root, charPtr + newSize, decommitSize); | 1030 partitionDecommitSystemPages(root, charPtr + newSize, decommitSize); |
1058 setSystemPagesInaccessible(charPtr + newSize, decommitSize); | 1031 setSystemPagesInaccessible(charPtr + newSize, decommitSize); |
1059 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { | 1032 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { |
1060 // Grow within the actually allocated memory. Just need to make the | 1033 // Grow within the actually allocated memory. Just need to make the |
1061 // pages accessible again. | 1034 // pages accessible again. |
1062 size_t recommitSize = newSize - currentSize; | 1035 size_t recommitSize = newSize - currentSize; |
1063 bool ret = setSystemPagesAccessible(charPtr + currentSize, recommitSize); | 1036 bool ret = setSystemPagesAccessible(charPtr + currentSize, recommitSize); |
1064 RELEASE_ASSERT(ret); | 1037 CHECK(ret); |
1065 partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize); | 1038 partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize); |
1066 | 1039 |
1067 #if ENABLE(ASSERT) | 1040 #if DCHECK_IS_ON() |
1068 memset(charPtr + currentSize, kUninitializedByte, recommitSize); | 1041 memset(charPtr + currentSize, kUninitializedByte, recommitSize); |
1069 #endif | 1042 #endif |
1070 } else { | 1043 } else { |
1071 // We can't perform the realloc in-place. | 1044 // We can't perform the realloc in-place. |
1072 // TODO: support this too when possible. | 1045 // TODO: support this too when possible. |
1073 return false; | 1046 return false; |
1074 } | 1047 } |
1075 | 1048 |
1076 #if ENABLE(ASSERT) | 1049 #if DCHECK_IS_ON() |
1077 // Write a new trailing cookie. | 1050 // Write a new trailing cookie. |
1078 partitionCookieWriteValue(charPtr + rawSize - kCookieSize); | 1051 partitionCookieWriteValue(charPtr + rawSize - kCookieSize); |
1079 #endif | 1052 #endif |
1080 | 1053 |
1081 partitionPageSetRawSize(page, rawSize); | 1054 partitionPageSetRawSize(page, rawSize); |
1082 ASSERT(partitionPageGetRawSize(page) == rawSize); | 1055 DCHECK(partitionPageGetRawSize(page) == rawSize); |
1083 | 1056 |
1084 page->bucket->slotSize = newSize; | 1057 page->bucket->slotSize = newSize; |
1085 return true; | 1058 return true; |
1086 } | 1059 } |
1087 | 1060 |
1088 void* partitionReallocGeneric(PartitionRootGeneric* root, | 1061 void* partitionReallocGeneric(PartitionRootGeneric* root, |
1089 void* ptr, | 1062 void* ptr, |
1090 size_t newSize, | 1063 size_t newSize, |
1091 const char* typeName) { | 1064 const char* typeName) { |
1092 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 1065 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
1093 return realloc(ptr, newSize); | 1066 return realloc(ptr, newSize); |
1094 #else | 1067 #else |
1095 if (UNLIKELY(!ptr)) | 1068 if (UNLIKELY(!ptr)) |
1096 return partitionAllocGeneric(root, newSize, typeName); | 1069 return partitionAllocGeneric(root, newSize, typeName); |
1097 if (UNLIKELY(!newSize)) { | 1070 if (UNLIKELY(!newSize)) { |
1098 partitionFreeGeneric(root, ptr); | 1071 partitionFreeGeneric(root, ptr); |
1099 return 0; | 1072 return 0; |
1100 } | 1073 } |
1101 | 1074 |
1102 if (newSize > kGenericMaxDirectMapped) | 1075 if (newSize > kGenericMaxDirectMapped) |
1103 partitionExcessiveAllocationSize(); | 1076 partitionExcessiveAllocationSize(); |
1104 | 1077 |
1105 ASSERT(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr))); | 1078 DCHECK(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr))); |
1106 | 1079 |
1107 PartitionPage* page = | 1080 PartitionPage* page = |
1108 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); | 1081 partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); |
1109 | 1082 |
1110 if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) { | 1083 if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) { |
1111 // We may be able to perform the realloc in place by changing the | 1084 // We may be able to perform the realloc in place by changing the |
1112 // accessibility of memory pages and, if reducing the size, decommitting | 1085 // accessibility of memory pages and, if reducing the size, decommitting |
1113 // them. | 1086 // them. |
1114 if (partitionReallocDirectMappedInPlace(root, page, newSize)) { | 1087 if (partitionReallocDirectMappedInPlace(root, page, newSize)) { |
1115 PartitionAllocHooks::reallocHookIfEnabled(ptr, ptr, newSize, typeName); | 1088 PartitionAllocHooks::reallocHookIfEnabled(ptr, ptr, newSize, typeName); |
(...skipping 30 matching lines...) Expand all Loading... |
1146 const PartitionBucket* bucket = page->bucket; | 1119 const PartitionBucket* bucket = page->bucket; |
1147 size_t slotSize = bucket->slotSize; | 1120 size_t slotSize = bucket->slotSize; |
1148 if (slotSize < kSystemPageSize || !page->numAllocatedSlots) | 1121 if (slotSize < kSystemPageSize || !page->numAllocatedSlots) |
1149 return 0; | 1122 return 0; |
1150 | 1123 |
1151 size_t bucketNumSlots = partitionBucketSlots(bucket); | 1124 size_t bucketNumSlots = partitionBucketSlots(bucket); |
1152 size_t discardableBytes = 0; | 1125 size_t discardableBytes = 0; |
1153 | 1126 |
1154 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | 1127 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); |
1155 if (rawSize) { | 1128 if (rawSize) { |
1156 uint32_t usedBytes = | 1129 uint32_t usedBytes = static_cast<uint32_t>(roundUpToSystemPage(rawSize)); |
1157 static_cast<uint32_t>(WTF::roundUpToSystemPage(rawSize)); | |
1158 discardableBytes = bucket->slotSize - usedBytes; | 1130 discardableBytes = bucket->slotSize - usedBytes; |
1159 if (discardableBytes && discard) { | 1131 if (discardableBytes && discard) { |
1160 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 1132 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
1161 ptr += usedBytes; | 1133 ptr += usedBytes; |
1162 discardSystemPages(ptr, discardableBytes); | 1134 discardSystemPages(ptr, discardableBytes); |
1163 } | 1135 } |
1164 return discardableBytes; | 1136 return discardableBytes; |
1165 } | 1137 } |
1166 | 1138 |
1167 const size_t maxSlotCount = | 1139 const size_t maxSlotCount = |
1168 (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; | 1140 (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; |
1169 ASSERT(bucketNumSlots <= maxSlotCount); | 1141 DCHECK(bucketNumSlots <= maxSlotCount); |
1170 ASSERT(page->numUnprovisionedSlots < bucketNumSlots); | 1142 DCHECK(page->numUnprovisionedSlots < bucketNumSlots); |
1171 size_t numSlots = bucketNumSlots - page->numUnprovisionedSlots; | 1143 size_t numSlots = bucketNumSlots - page->numUnprovisionedSlots; |
1172 char slotUsage[maxSlotCount]; | 1144 char slotUsage[maxSlotCount]; |
1173 size_t lastSlot = static_cast<size_t>(-1); | 1145 size_t lastSlot = static_cast<size_t>(-1); |
1174 memset(slotUsage, 1, numSlots); | 1146 memset(slotUsage, 1, numSlots); |
1175 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 1147 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
1176 PartitionFreelistEntry* entry = page->freelistHead; | 1148 PartitionFreelistEntry* entry = page->freelistHead; |
1177 // First, walk the freelist for this page and make a bitmap of which slots | 1149 // First, walk the freelist for this page and make a bitmap of which slots |
1178 // are not in use. | 1150 // are not in use. |
1179 while (entry) { | 1151 while (entry) { |
1180 size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slotSize; | 1152 size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slotSize; |
1181 ASSERT(slotIndex < numSlots); | 1153 DCHECK(slotIndex < numSlots); |
1182 slotUsage[slotIndex] = 0; | 1154 slotUsage[slotIndex] = 0; |
1183 entry = partitionFreelistMask(entry->next); | 1155 entry = partitionFreelistMask(entry->next); |
1184 // If we have a slot where the masked freelist entry is 0, we can | 1156 // If we have a slot where the masked freelist entry is 0, we can |
1185 // actually discard that freelist entry because touching a discarded | 1157 // actually discard that freelist entry because touching a discarded |
1186 // page is guaranteed to return original content or 0. | 1158 // page is guaranteed to return original content or 0. |
1187 // (Note that this optimization won't fire on big endian machines | 1159 // (Note that this optimization won't fire on big endian machines |
1188 // because the masking function is negation.) | 1160 // because the masking function is negation.) |
1189 if (!partitionFreelistMask(entry)) | 1161 if (!partitionFreelistMask(entry)) |
1190 lastSlot = slotIndex; | 1162 lastSlot = slotIndex; |
1191 } | 1163 } |
1192 | 1164 |
1193 // If the slot(s) at the end of the slot span are not in used, we can | 1165 // If the slot(s) at the end of the slot span are not in used, we can |
1194 // truncate them entirely and rewrite the freelist. | 1166 // truncate them entirely and rewrite the freelist. |
1195 size_t truncatedSlots = 0; | 1167 size_t truncatedSlots = 0; |
1196 while (!slotUsage[numSlots - 1]) { | 1168 while (!slotUsage[numSlots - 1]) { |
1197 truncatedSlots++; | 1169 truncatedSlots++; |
1198 numSlots--; | 1170 numSlots--; |
1199 ASSERT(numSlots); | 1171 DCHECK(numSlots); |
1200 } | 1172 } |
1201 // First, do the work of calculating the discardable bytes. Don't actually | 1173 // First, do the work of calculating the discardable bytes. Don't actually |
1202 // discard anything unless the discard flag was passed in. | 1174 // discard anything unless the discard flag was passed in. |
1203 char* beginPtr = nullptr; | 1175 char* beginPtr = nullptr; |
1204 char* endPtr = nullptr; | 1176 char* endPtr = nullptr; |
1205 size_t unprovisionedBytes = 0; | 1177 size_t unprovisionedBytes = 0; |
1206 if (truncatedSlots) { | 1178 if (truncatedSlots) { |
1207 beginPtr = ptr + (numSlots * slotSize); | 1179 beginPtr = ptr + (numSlots * slotSize); |
1208 endPtr = beginPtr + (slotSize * truncatedSlots); | 1180 endPtr = beginPtr + (slotSize * truncatedSlots); |
1209 beginPtr = reinterpret_cast<char*>( | 1181 beginPtr = reinterpret_cast<char*>( |
1210 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); | 1182 roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); |
1211 // We round the end pointer here up and not down because we're at the | 1183 // We round the end pointer here up and not down because we're at the |
1212 // end of a slot span, so we "own" all the way up the page boundary. | 1184 // end of a slot span, so we "own" all the way up the page boundary. |
1213 endPtr = reinterpret_cast<char*>( | 1185 endPtr = reinterpret_cast<char*>( |
1214 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(endPtr))); | 1186 roundUpToSystemPage(reinterpret_cast<size_t>(endPtr))); |
1215 ASSERT(endPtr <= ptr + partitionBucketBytes(bucket)); | 1187 DCHECK(endPtr <= ptr + partitionBucketBytes(bucket)); |
1216 if (beginPtr < endPtr) { | 1188 if (beginPtr < endPtr) { |
1217 unprovisionedBytes = endPtr - beginPtr; | 1189 unprovisionedBytes = endPtr - beginPtr; |
1218 discardableBytes += unprovisionedBytes; | 1190 discardableBytes += unprovisionedBytes; |
1219 } | 1191 } |
1220 } | 1192 } |
1221 if (unprovisionedBytes && discard) { | 1193 if (unprovisionedBytes && discard) { |
1222 ASSERT(truncatedSlots > 0); | 1194 DCHECK(truncatedSlots > 0); |
1223 size_t numNewEntries = 0; | 1195 size_t numNewEntries = 0; |
1224 page->numUnprovisionedSlots += static_cast<uint16_t>(truncatedSlots); | 1196 page->numUnprovisionedSlots += static_cast<uint16_t>(truncatedSlots); |
1225 // Rewrite the freelist. | 1197 // Rewrite the freelist. |
1226 PartitionFreelistEntry** entryPtr = &page->freelistHead; | 1198 PartitionFreelistEntry** entryPtr = &page->freelistHead; |
1227 for (size_t slotIndex = 0; slotIndex < numSlots; ++slotIndex) { | 1199 for (size_t slotIndex = 0; slotIndex < numSlots; ++slotIndex) { |
1228 if (slotUsage[slotIndex]) | 1200 if (slotUsage[slotIndex]) |
1229 continue; | 1201 continue; |
1230 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>( | 1202 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>( |
1231 ptr + (slotSize * slotIndex)); | 1203 ptr + (slotSize * slotIndex)); |
1232 *entryPtr = partitionFreelistMask(entry); | 1204 *entryPtr = partitionFreelistMask(entry); |
1233 entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry); | 1205 entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry); |
1234 numNewEntries++; | 1206 numNewEntries++; |
1235 } | 1207 } |
1236 // Terminate the freelist chain. | 1208 // Terminate the freelist chain. |
1237 *entryPtr = nullptr; | 1209 *entryPtr = nullptr; |
1238 // The freelist head is stored unmasked. | 1210 // The freelist head is stored unmasked. |
1239 page->freelistHead = partitionFreelistMask(page->freelistHead); | 1211 page->freelistHead = partitionFreelistMask(page->freelistHead); |
1240 ASSERT(numNewEntries == numSlots - page->numAllocatedSlots); | 1212 DCHECK(numNewEntries == numSlots - page->numAllocatedSlots); |
1241 // Discard the memory. | 1213 // Discard the memory. |
1242 discardSystemPages(beginPtr, unprovisionedBytes); | 1214 discardSystemPages(beginPtr, unprovisionedBytes); |
1243 } | 1215 } |
1244 | 1216 |
1245 // Next, walk the slots and for any not in use, consider where the system | 1217 // Next, walk the slots and for any not in use, consider where the system |
1246 // page boundaries occur. We can release any system pages back to the | 1218 // page boundaries occur. We can release any system pages back to the |
1247 // system as long as we don't interfere with a freelist pointer or an | 1219 // system as long as we don't interfere with a freelist pointer or an |
1248 // adjacent slot. | 1220 // adjacent slot. |
1249 for (size_t i = 0; i < numSlots; ++i) { | 1221 for (size_t i = 0; i < numSlots; ++i) { |
1250 if (slotUsage[i]) | 1222 if (slotUsage[i]) |
1251 continue; | 1223 continue; |
1252 // The first address we can safely discard is just after the freelist | 1224 // The first address we can safely discard is just after the freelist |
1253 // pointer. There's one quirk: if the freelist pointer is actually a | 1225 // pointer. There's one quirk: if the freelist pointer is actually a |
1254 // null, we can discard that pointer value too. | 1226 // null, we can discard that pointer value too. |
1255 char* beginPtr = ptr + (i * slotSize); | 1227 char* beginPtr = ptr + (i * slotSize); |
1256 char* endPtr = beginPtr + slotSize; | 1228 char* endPtr = beginPtr + slotSize; |
1257 if (i != lastSlot) | 1229 if (i != lastSlot) |
1258 beginPtr += sizeof(PartitionFreelistEntry); | 1230 beginPtr += sizeof(PartitionFreelistEntry); |
1259 beginPtr = reinterpret_cast<char*>( | 1231 beginPtr = reinterpret_cast<char*>( |
1260 WTF::roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); | 1232 roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); |
1261 endPtr = reinterpret_cast<char*>( | 1233 endPtr = reinterpret_cast<char*>( |
1262 WTF::roundDownToSystemPage(reinterpret_cast<size_t>(endPtr))); | 1234 roundDownToSystemPage(reinterpret_cast<size_t>(endPtr))); |
1263 if (beginPtr < endPtr) { | 1235 if (beginPtr < endPtr) { |
1264 size_t partialSlotBytes = endPtr - beginPtr; | 1236 size_t partialSlotBytes = endPtr - beginPtr; |
1265 discardableBytes += partialSlotBytes; | 1237 discardableBytes += partialSlotBytes; |
1266 if (discard) | 1238 if (discard) |
1267 discardSystemPages(beginPtr, partialSlotBytes); | 1239 discardSystemPages(beginPtr, partialSlotBytes); |
1268 } | 1240 } |
1269 } | 1241 } |
1270 return discardableBytes; | 1242 return discardableBytes; |
1271 } | 1243 } |
1272 | 1244 |
1273 static void partitionPurgeBucket(PartitionBucket* bucket) { | 1245 static void partitionPurgeBucket(PartitionBucket* bucket) { |
1274 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { | 1246 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { |
1275 for (PartitionPage* page = bucket->activePagesHead; page; | 1247 for (PartitionPage* page = bucket->activePagesHead; page; |
1276 page = page->nextPage) { | 1248 page = page->nextPage) { |
1277 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 1249 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
1278 (void)partitionPurgePage(page, true); | 1250 (void)partitionPurgePage(page, true); |
1279 } | 1251 } |
1280 } | 1252 } |
1281 } | 1253 } |
1282 | 1254 |
1283 void partitionPurgeMemory(PartitionRoot* root, int flags) { | 1255 void partitionPurgeMemory(PartitionRoot* root, int flags) { |
1284 if (flags & PartitionPurgeDecommitEmptyPages) | 1256 if (flags & PartitionPurgeDecommitEmptyPages) |
1285 partitionDecommitEmptyPages(root); | 1257 partitionDecommitEmptyPages(root); |
1286 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages | 1258 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages |
1287 // here because that flag is only useful for allocations >= system page | 1259 // here because that flag is only useful for allocations >= system page |
1288 // size. We only have allocations that large inside generic partitions | 1260 // size. We only have allocations that large inside generic partitions |
1289 // at the moment. | 1261 // at the moment. |
1290 } | 1262 } |
1291 | 1263 |
1292 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) { | 1264 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) { |
1293 SpinLock::Guard guard(root->lock); | 1265 subtle::SpinLock::Guard guard(root->lock); |
1294 if (flags & PartitionPurgeDecommitEmptyPages) | 1266 if (flags & PartitionPurgeDecommitEmptyPages) |
1295 partitionDecommitEmptyPages(root); | 1267 partitionDecommitEmptyPages(root); |
1296 if (flags & PartitionPurgeDiscardUnusedSystemPages) { | 1268 if (flags & PartitionPurgeDiscardUnusedSystemPages) { |
1297 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1269 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
1298 PartitionBucket* bucket = &root->buckets[i]; | 1270 PartitionBucket* bucket = &root->buckets[i]; |
1299 if (bucket->slotSize >= kSystemPageSize) | 1271 if (bucket->slotSize >= kSystemPageSize) |
1300 partitionPurgeBucket(bucket); | 1272 partitionPurgeBucket(bucket); |
1301 } | 1273 } |
1302 } | 1274 } |
1303 } | 1275 } |
(...skipping 11 matching lines...) Expand all Loading... |
1315 partitionPurgePage(const_cast<PartitionPage*>(page), false); | 1287 partitionPurgePage(const_cast<PartitionPage*>(page), false); |
1316 | 1288 |
1317 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | 1289 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); |
1318 if (rawSize) | 1290 if (rawSize) |
1319 statsOut->activeBytes += static_cast<uint32_t>(rawSize); | 1291 statsOut->activeBytes += static_cast<uint32_t>(rawSize); |
1320 else | 1292 else |
1321 statsOut->activeBytes += | 1293 statsOut->activeBytes += |
1322 (page->numAllocatedSlots * statsOut->bucketSlotSize); | 1294 (page->numAllocatedSlots * statsOut->bucketSlotSize); |
1323 | 1295 |
1324 size_t pageBytesResident = | 1296 size_t pageBytesResident = |
1325 WTF::roundUpToSystemPage((bucketNumSlots - page->numUnprovisionedSlots) * | 1297 roundUpToSystemPage((bucketNumSlots - page->numUnprovisionedSlots) * |
1326 statsOut->bucketSlotSize); | 1298 statsOut->bucketSlotSize); |
1327 statsOut->residentBytes += pageBytesResident; | 1299 statsOut->residentBytes += pageBytesResident; |
1328 if (partitionPageStateIsEmpty(page)) { | 1300 if (partitionPageStateIsEmpty(page)) { |
1329 statsOut->decommittableBytes += pageBytesResident; | 1301 statsOut->decommittableBytes += pageBytesResident; |
1330 ++statsOut->numEmptyPages; | 1302 ++statsOut->numEmptyPages; |
1331 } else if (partitionPageStateIsFull(page)) { | 1303 } else if (partitionPageStateIsFull(page)) { |
1332 ++statsOut->numFullPages; | 1304 ++statsOut->numFullPages; |
1333 } else { | 1305 } else { |
1334 ASSERT(partitionPageStateIsActive(page)); | 1306 DCHECK(partitionPageStateIsActive(page)); |
1335 ++statsOut->numActivePages; | 1307 ++statsOut->numActivePages; |
1336 } | 1308 } |
1337 } | 1309 } |
1338 | 1310 |
1339 static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, | 1311 static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, |
1340 const PartitionBucket* bucket) { | 1312 const PartitionBucket* bucket) { |
1341 ASSERT(!partitionBucketIsDirectMapped(bucket)); | 1313 DCHECK(!partitionBucketIsDirectMapped(bucket)); |
1342 statsOut->isValid = false; | 1314 statsOut->isValid = false; |
1343 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), | 1315 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), |
1344 // the bucket might still need to be reported if it has a list of empty, | 1316 // the bucket might still need to be reported if it has a list of empty, |
1345 // decommitted or full pages. | 1317 // decommitted or full pages. |
1346 if (bucket->activePagesHead == &PartitionRootGeneric::gSeedPage && | 1318 if (bucket->activePagesHead == &PartitionRootGeneric::gSeedPage && |
1347 !bucket->emptyPagesHead && !bucket->decommittedPagesHead && | 1319 !bucket->emptyPagesHead && !bucket->decommittedPagesHead && |
1348 !bucket->numFullPages) | 1320 !bucket->numFullPages) |
1349 return; | 1321 return; |
1350 | 1322 |
1351 memset(statsOut, '\0', sizeof(*statsOut)); | 1323 memset(statsOut, '\0', sizeof(*statsOut)); |
1352 statsOut->isValid = true; | 1324 statsOut->isValid = true; |
1353 statsOut->isDirectMap = false; | 1325 statsOut->isDirectMap = false; |
1354 statsOut->numFullPages = static_cast<size_t>(bucket->numFullPages); | 1326 statsOut->numFullPages = static_cast<size_t>(bucket->numFullPages); |
1355 statsOut->bucketSlotSize = bucket->slotSize; | 1327 statsOut->bucketSlotSize = bucket->slotSize; |
1356 uint16_t bucketNumSlots = partitionBucketSlots(bucket); | 1328 uint16_t bucketNumSlots = partitionBucketSlots(bucket); |
1357 size_t bucketUsefulStorage = statsOut->bucketSlotSize * bucketNumSlots; | 1329 size_t bucketUsefulStorage = statsOut->bucketSlotSize * bucketNumSlots; |
1358 statsOut->allocatedPageSize = partitionBucketBytes(bucket); | 1330 statsOut->allocatedPageSize = partitionBucketBytes(bucket); |
1359 statsOut->activeBytes = bucket->numFullPages * bucketUsefulStorage; | 1331 statsOut->activeBytes = bucket->numFullPages * bucketUsefulStorage; |
1360 statsOut->residentBytes = bucket->numFullPages * statsOut->allocatedPageSize; | 1332 statsOut->residentBytes = bucket->numFullPages * statsOut->allocatedPageSize; |
1361 | 1333 |
1362 for (const PartitionPage* page = bucket->emptyPagesHead; page; | 1334 for (const PartitionPage* page = bucket->emptyPagesHead; page; |
1363 page = page->nextPage) { | 1335 page = page->nextPage) { |
1364 ASSERT(partitionPageStateIsEmpty(page) || | 1336 DCHECK(partitionPageStateIsEmpty(page) || |
1365 partitionPageStateIsDecommitted(page)); | 1337 partitionPageStateIsDecommitted(page)); |
1366 partitionDumpPageStats(statsOut, page); | 1338 partitionDumpPageStats(statsOut, page); |
1367 } | 1339 } |
1368 for (const PartitionPage* page = bucket->decommittedPagesHead; page; | 1340 for (const PartitionPage* page = bucket->decommittedPagesHead; page; |
1369 page = page->nextPage) { | 1341 page = page->nextPage) { |
1370 ASSERT(partitionPageStateIsDecommitted(page)); | 1342 DCHECK(partitionPageStateIsDecommitted(page)); |
1371 partitionDumpPageStats(statsOut, page); | 1343 partitionDumpPageStats(statsOut, page); |
1372 } | 1344 } |
1373 | 1345 |
1374 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { | 1346 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { |
1375 for (const PartitionPage* page = bucket->activePagesHead; page; | 1347 for (const PartitionPage* page = bucket->activePagesHead; page; |
1376 page = page->nextPage) { | 1348 page = page->nextPage) { |
1377 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 1349 DCHECK(page != &PartitionRootGeneric::gSeedPage); |
1378 partitionDumpPageStats(statsOut, page); | 1350 partitionDumpPageStats(statsOut, page); |
1379 } | 1351 } |
1380 } | 1352 } |
1381 } | 1353 } |
1382 | 1354 |
1383 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, | 1355 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, |
1384 const char* partitionName, | 1356 const char* partitionName, |
1385 bool isLightDump, | 1357 bool isLightDump, |
1386 PartitionStatsDumper* partitionStatsDumper) { | 1358 PartitionStatsDumper* partitionStatsDumper) { |
1387 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; | 1359 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; |
1388 static const size_t kMaxReportableDirectMaps = 4096; | 1360 static const size_t kMaxReportableDirectMaps = 4096; |
1389 uint32_t directMapLengths[kMaxReportableDirectMaps]; | 1361 uint32_t directMapLengths[kMaxReportableDirectMaps]; |
1390 size_t numDirectMappedAllocations = 0; | 1362 size_t numDirectMappedAllocations = 0; |
1391 | 1363 |
1392 { | 1364 { |
1393 SpinLock::Guard guard(partition->lock); | 1365 subtle::SpinLock::Guard guard(partition->lock); |
1394 | 1366 |
1395 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1367 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
1396 const PartitionBucket* bucket = &partition->buckets[i]; | 1368 const PartitionBucket* bucket = &partition->buckets[i]; |
1397 // Don't report the pseudo buckets that the generic allocator sets up in | 1369 // Don't report the pseudo buckets that the generic allocator sets up in |
1398 // order to preserve a fast size->bucket map (see | 1370 // order to preserve a fast size->bucket map (see |
1399 // partitionAllocGenericInit for details). | 1371 // partitionAllocGenericInit for details). |
1400 if (!bucket->activePagesHead) | 1372 if (!bucket->activePagesHead) |
1401 bucketStats[i].isValid = false; | 1373 bucketStats[i].isValid = false; |
1402 else | 1374 else |
1403 partitionDumpBucketStats(&bucketStats[i], bucket); | 1375 partitionDumpBucketStats(&bucketStats[i], bucket); |
1404 } | 1376 } |
1405 | 1377 |
1406 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; | 1378 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; |
1407 extent = extent->nextExtent) { | 1379 extent = extent->nextExtent) { |
1408 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent); | 1380 DCHECK(!extent->nextExtent || extent->nextExtent->prevExtent == extent); |
1409 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; | 1381 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; |
1410 ++numDirectMappedAllocations; | 1382 ++numDirectMappedAllocations; |
1411 if (numDirectMappedAllocations == kMaxReportableDirectMaps) | 1383 if (numDirectMappedAllocations == kMaxReportableDirectMaps) |
1412 break; | 1384 break; |
1413 } | 1385 } |
1414 } | 1386 } |
1415 | 1387 |
1416 // partitionsDumpBucketStats is called after collecting stats because it | 1388 // partitionsDumpBucketStats is called after collecting stats because it |
1417 // can try to allocate using PartitionAllocGeneric and it can't obtain the | 1389 // can try to allocate using PartitionAllocGeneric and it can't obtain the |
1418 // lock. | 1390 // lock. |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1456 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | 1428 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); |
1457 } | 1429 } |
1458 | 1430 |
1459 void partitionDumpStats(PartitionRoot* partition, | 1431 void partitionDumpStats(PartitionRoot* partition, |
1460 const char* partitionName, | 1432 const char* partitionName, |
1461 bool isLightDump, | 1433 bool isLightDump, |
1462 PartitionStatsDumper* partitionStatsDumper) { | 1434 PartitionStatsDumper* partitionStatsDumper) { |
1463 static const size_t kMaxReportableBuckets = 4096 / sizeof(void*); | 1435 static const size_t kMaxReportableBuckets = 4096 / sizeof(void*); |
1464 PartitionBucketMemoryStats memoryStats[kMaxReportableBuckets]; | 1436 PartitionBucketMemoryStats memoryStats[kMaxReportableBuckets]; |
1465 const size_t partitionNumBuckets = partition->numBuckets; | 1437 const size_t partitionNumBuckets = partition->numBuckets; |
1466 ASSERT(partitionNumBuckets <= kMaxReportableBuckets); | 1438 DCHECK(partitionNumBuckets <= kMaxReportableBuckets); |
1467 | 1439 |
1468 for (size_t i = 0; i < partitionNumBuckets; ++i) | 1440 for (size_t i = 0; i < partitionNumBuckets; ++i) |
1469 partitionDumpBucketStats(&memoryStats[i], &partition->buckets()[i]); | 1441 partitionDumpBucketStats(&memoryStats[i], &partition->buckets()[i]); |
1470 | 1442 |
1471 // partitionsDumpBucketStats is called after collecting stats because it | 1443 // partitionsDumpBucketStats is called after collecting stats because it |
1472 // can use PartitionAlloc to allocate and this can affect the statistics. | 1444 // can use PartitionAlloc to allocate and this can affect the statistics. |
1473 PartitionMemoryStats partitionStats = {0}; | 1445 PartitionMemoryStats partitionStats = {0}; |
1474 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages; | 1446 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages; |
1475 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; | 1447 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; |
1476 ASSERT(!partition->totalSizeOfDirectMappedPages); | 1448 DCHECK(!partition->totalSizeOfDirectMappedPages); |
1477 for (size_t i = 0; i < partitionNumBuckets; ++i) { | 1449 for (size_t i = 0; i < partitionNumBuckets; ++i) { |
1478 if (memoryStats[i].isValid) { | 1450 if (memoryStats[i].isValid) { |
1479 partitionStats.totalResidentBytes += memoryStats[i].residentBytes; | 1451 partitionStats.totalResidentBytes += memoryStats[i].residentBytes; |
1480 partitionStats.totalActiveBytes += memoryStats[i].activeBytes; | 1452 partitionStats.totalActiveBytes += memoryStats[i].activeBytes; |
1481 partitionStats.totalDecommittableBytes += | 1453 partitionStats.totalDecommittableBytes += |
1482 memoryStats[i].decommittableBytes; | 1454 memoryStats[i].decommittableBytes; |
1483 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBytes; | 1455 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBytes; |
1484 if (!isLightDump) | 1456 if (!isLightDump) |
1485 partitionStatsDumper->partitionsDumpBucketStats(partitionName, | 1457 partitionStatsDumper->partitionsDumpBucketStats(partitionName, |
1486 &memoryStats[i]); | 1458 &memoryStats[i]); |
1487 } | 1459 } |
1488 } | 1460 } |
1489 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | 1461 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); |
1490 } | 1462 } |
1491 | 1463 |
1492 } // namespace WTF | 1464 } // namespace base |
OLD | NEW |