Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(172)

Side by Side Diff: third_party/WebKit/Source/wtf/PartitionAlloc.cpp

Issue 1881983003: Move PartitionAlloc related things into wtf/allocator. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "wtf/PartitionAlloc.h"
32
33 #include <string.h>
34
35 #ifndef NDEBUG
36 #include <stdio.h>
37 #endif
38
39 // Two partition pages are used as guard / metadata page so make sure the super
40 // page size is bigger.
41 static_assert(WTF::kPartitionPageSize * 4 <= WTF::kSuperPageSize, "ok super page size");
42 static_assert(!(WTF::kSuperPageSize % WTF::kPartitionPageSize), "ok super page m ultiple");
43 // Four system pages gives us room to hack out a still-guard-paged piece
44 // of metadata in the middle of a guard partition page.
45 static_assert(WTF::kSystemPageSize * 4 <= WTF::kPartitionPageSize, "ok partition page size");
46 static_assert(!(WTF::kPartitionPageSize % WTF::kSystemPageSize), "ok partition p age multiple");
47 static_assert(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, "PartitionPa ge should not be too big");
48 static_assert(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, "Partition Bucket should not be too big");
49 static_assert(sizeof(WTF::PartitionSuperPageExtentEntry) <= WTF::kPageMetadataSi ze, "PartitionSuperPageExtentEntry should not be too big");
50 static_assert(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= WT F::kSystemPageSize, "page metadata fits in hole");
51 // Check that some of our zanier calculations worked out as expected.
52 static_assert(WTF::kGenericSmallestBucket == 8, "generic smallest bucket");
53 static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed");
54 static_assert(WTF::kMaxSystemPagesPerSlotSpan < (1 << 8), "System pages per slot span must be less than 128.");
55
56 namespace WTF {
57
58 SpinLock PartitionRootBase::gInitializedLock;
59 bool PartitionRootBase::gInitialized = false;
60 PartitionPage PartitionRootBase::gSeedPage;
61 PartitionBucket PartitionRootBase::gPagedBucket;
62 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr;
63 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = nul lptr;
64 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr;
65
66 static uint8_t partitionBucketNumSystemPages(size_t size)
67 {
68 // This works out reasonably for the current bucket sizes of the generic
69 // allocator, and the current values of partition page size and constants.
70 // Specifically, we have enough room to always pack the slots perfectly into
71 // some number of system pages. The only waste is the waste associated with
72 // unfaulted pages (i.e. wasted address space).
73 // TODO: we end up using a lot of system pages for very small sizes. For
74 // example, we'll use 12 system pages for slot size 24. The slot size is
75 // so small that the waste would be tiny with just 4, or 1, system pages.
76 // Later, we can investigate whether there are anti-fragmentation benefits
77 // to using fewer system pages.
78 double bestWasteRatio = 1.0f;
79 uint16_t bestPages = 0;
80 if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
81 ASSERT(!(size % kSystemPageSize));
82 bestPages = static_cast<uint16_t>(size / kSystemPageSize);
83 RELEASE_ASSERT(bestPages < (1 << 8));
84 return static_cast<uint8_t>(bestPages);
85 }
86 ASSERT(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
87 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; i <= kMaxSystemPagesP erSlotSpan; ++i) {
88 size_t pageSize = kSystemPageSize * i;
89 size_t numSlots = pageSize / size;
90 size_t waste = pageSize - (numSlots * size);
91 // Leaving a page unfaulted is not free; the page will occupy an empty p age table entry.
92 // Make a simple attempt to account for that.
93 size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1);
94 size_t numUnfaultedPages = numRemainderPages ? (kNumSystemPagesPerPartit ionPage - numRemainderPages) : 0;
95 waste += sizeof(void*) * numUnfaultedPages;
96 double wasteRatio = (double) waste / (double) pageSize;
97 if (wasteRatio < bestWasteRatio) {
98 bestWasteRatio = wasteRatio;
99 bestPages = i;
100 }
101 }
102 ASSERT(bestPages > 0);
103 RELEASE_ASSERT(bestPages <= kMaxSystemPagesPerSlotSpan);
104 return static_cast<uint8_t>(bestPages);
105 }
106
107 static void partitionAllocBaseInit(PartitionRootBase* root)
108 {
109 ASSERT(!root->initialized);
110 {
111 SpinLock::Guard guard(PartitionRootBase::gInitializedLock);
112 if (!PartitionRootBase::gInitialized) {
113 PartitionRootBase::gInitialized = true;
114 // We mark the seed page as free to make sure it is skipped by our
115 // logic to find a new active page.
116 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGene ric::gSeedPage;
117 }
118 }
119
120 root->initialized = true;
121 root->totalSizeOfCommittedPages = 0;
122 root->totalSizeOfSuperPages = 0;
123 root->totalSizeOfDirectMappedPages = 0;
124 root->nextSuperPage = 0;
125 root->nextPartitionPage = 0;
126 root->nextPartitionPageEnd = 0;
127 root->firstExtent = 0;
128 root->currentExtent = 0;
129 root->directMapList = 0;
130
131 memset(&root->globalEmptyPageRing, '\0', sizeof(root->globalEmptyPageRing));
132 root->globalEmptyPageRingIndex = 0;
133
134 // This is a "magic" value so we can test if a root pointer is valid.
135 root->invertedSelf = ~reinterpret_cast<uintptr_t>(root);
136 }
137
138 static void partitionBucketInitBase(PartitionBucket* bucket, PartitionRootBase* root)
139 {
140 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage;
141 bucket->emptyPagesHead = 0;
142 bucket->decommittedPagesHead = 0;
143 bucket->numFullPages = 0;
144 bucket->numSystemPagesPerSlotSpan = partitionBucketNumSystemPages(bucket->sl otSize);
145 }
146
147 void partitionAllocGlobalInit(void (*oomHandlingFunction)())
148 {
149 ASSERT(oomHandlingFunction);
150 PartitionRootBase::gOomHandlingFunction = oomHandlingFunction;
151 }
152
153 void partitionAllocInit(PartitionRoot* root, size_t numBuckets, size_t maxAlloca tion)
154 {
155 partitionAllocBaseInit(root);
156
157 root->numBuckets = numBuckets;
158 root->maxAllocation = maxAllocation;
159 size_t i;
160 for (i = 0; i < root->numBuckets; ++i) {
161 PartitionBucket* bucket = &root->buckets()[i];
162 if (!i)
163 bucket->slotSize = kAllocationGranularity;
164 else
165 bucket->slotSize = i << kBucketShift;
166 partitionBucketInitBase(bucket, root);
167 }
168 }
169
170 void partitionAllocGenericInit(PartitionRootGeneric* root)
171 {
172 SpinLock::Guard guard(root->lock);
173
174 partitionAllocBaseInit(root);
175
176 // Precalculate some shift and mask constants used in the hot path.
177 // Example: malloc(41) == 101001 binary.
178 // Order is 6 (1 << 6-1)==32 is highest bit set.
179 // orderIndex is the next three MSB == 010 == 2.
180 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 f or the subOrderIndex).
181 size_t order;
182 for (order = 0; order <= kBitsPerSizet; ++order) {
183 size_t orderIndexShift;
184 if (order < kGenericNumBucketsPerOrderBits + 1)
185 orderIndexShift = 0;
186 else
187 orderIndexShift = order - (kGenericNumBucketsPerOrderBits + 1);
188 root->orderIndexShifts[order] = orderIndexShift;
189 size_t subOrderIndexMask;
190 if (order == kBitsPerSizet) {
191 // This avoids invoking undefined behavior for an excessive shift.
192 subOrderIndexMask = static_cast<size_t>(-1) >> (kGenericNumBucketsPe rOrderBits + 1);
193 } else {
194 subOrderIndexMask = ((static_cast<size_t>(1) << order) - 1) >> (kGen ericNumBucketsPerOrderBits + 1);
195 }
196 root->orderSubIndexMasks[order] = subOrderIndexMask;
197 }
198
199 // Set up the actual usable buckets first.
200 // Note that typical values (i.e. min allocation size of 8) will result in
201 // pseudo buckets (size==9 etc. or more generally, size is not a multiple
202 // of the smallest allocation granularity).
203 // We avoid them in the bucket lookup map, but we tolerate them to keep the
204 // code simpler and the structures more generic.
205 size_t i, j;
206 size_t currentSize = kGenericSmallestBucket;
207 size_t currentIncrement = kGenericSmallestBucket >> kGenericNumBucketsPerOrd erBits;
208 PartitionBucket* bucket = &root->buckets[0];
209 for (i = 0; i < kGenericNumBucketedOrders; ++i) {
210 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
211 bucket->slotSize = currentSize;
212 partitionBucketInitBase(bucket, root);
213 // Disable psuedo buckets so that touching them faults.
214 if (currentSize % kGenericSmallestBucket)
215 bucket->activePagesHead = 0;
216 currentSize += currentIncrement;
217 ++bucket;
218 }
219 currentIncrement <<= 1;
220 }
221 ASSERT(currentSize == 1 << kGenericMaxBucketedOrder);
222 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets);
223
224 // Then set up the fast size -> bucket lookup table.
225 bucket = &root->buckets[0];
226 PartitionBucket** bucketPtr = &root->bucketLookups[0];
227 for (order = 0; order <= kBitsPerSizet; ++order) {
228 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
229 if (order < kGenericMinBucketedOrder) {
230 // Use the bucket of the finest granularity for malloc(0) etc.
231 *bucketPtr++ = &root->buckets[0];
232 } else if (order > kGenericMaxBucketedOrder) {
233 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket;
234 } else {
235 PartitionBucket* validBucket = bucket;
236 // Skip over invalid buckets.
237 while (validBucket->slotSize % kGenericSmallestBucket)
238 validBucket++;
239 *bucketPtr++ = validBucket;
240 bucket++;
241 }
242 }
243 }
244 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets);
245 ASSERT(bucketPtr == &root->bucketLookups[0] + ((kBitsPerSizet + 1) * kGeneri cNumBucketsPerOrder));
246 // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
247 // which tries to overflow to a non-existant order.
248 *bucketPtr = &PartitionRootGeneric::gPagedBucket;
249 }
250
251 static bool partitionAllocShutdownBucket(PartitionBucket* bucket)
252 {
253 // Failure here indicates a memory leak.
254 bool foundLeak = bucket->numFullPages;
255 for (PartitionPage* page = bucket->activePagesHead; page; page = page->nextP age)
256 foundLeak |= (page->numAllocatedSlots > 0);
257 return foundLeak;
258 }
259
260 static bool partitionAllocBaseShutdown(PartitionRootBase* root)
261 {
262 ASSERT(root->initialized);
263 root->initialized = false;
264
265 // Now that we've examined all partition pages in all buckets, it's safe
266 // to free all our super pages. Since the super page extent entries are
267 // stored in the super pages, we need to be careful not to access them
268 // after we've released the corresponding super page.
269 PartitionSuperPageExtentEntry* entry = root->firstExtent;
270 while (entry) {
271 PartitionSuperPageExtentEntry* nextEntry = entry->next;
272 char* superPage = entry->superPageBase;
273 char* superPagesEnd = entry->superPagesEnd;
274 while (superPage < superPagesEnd) {
275 freePages(superPage, kSuperPageSize);
276 superPage += kSuperPageSize;
277 }
278 entry = nextEntry;
279 }
280 return root->directMapList;
281 }
282
283 bool partitionAllocShutdown(PartitionRoot* root)
284 {
285 bool foundLeak = false;
286 size_t i;
287 for (i = 0; i < root->numBuckets; ++i) {
288 PartitionBucket* bucket = &root->buckets()[i];
289 foundLeak |= partitionAllocShutdownBucket(bucket);
290 }
291 foundLeak |= partitionAllocBaseShutdown(root);
292 return !foundLeak;
293 }
294
295 bool partitionAllocGenericShutdown(PartitionRootGeneric* root)
296 {
297 SpinLock::Guard guard(root->lock);
298 bool foundLeak = false;
299 size_t i;
300 for (i = 0; i < kGenericNumBuckets; ++i) {
301 PartitionBucket* bucket = &root->buckets[i];
302 foundLeak |= partitionAllocShutdownBucket(bucket);
303 }
304 foundLeak |= partitionAllocBaseShutdown(root);
305 return !foundLeak;
306 }
307
308 #if !CPU(64BIT)
309 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages()
310 {
311 IMMEDIATE_CRASH();
312 }
313 #endif
314
315 static NEVER_INLINE void partitionOutOfMemory(const PartitionRootBase* root)
316 {
317 #if !CPU(64BIT)
318 // Check whether this OOM is due to a lot of super pages that are allocated
319 // but not committed, probably due to http://crbug.com/421387.
320 if (root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages - root- >totalSizeOfCommittedPages > kReasonableSizeOfUnusedPages) {
321 partitionOutOfMemoryWithLotsOfUncommitedPages();
322 }
323 #endif
324 if (PartitionRootBase::gOomHandlingFunction)
325 (*PartitionRootBase::gOomHandlingFunction)();
326 IMMEDIATE_CRASH();
327 }
328
329 static NEVER_INLINE void partitionExcessiveAllocationSize()
330 {
331 IMMEDIATE_CRASH();
332 }
333
334 static NEVER_INLINE void partitionBucketFull()
335 {
336 IMMEDIATE_CRASH();
337 }
338
339 // partitionPageStateIs*
340 // Note that it's only valid to call these functions on pages found on one of
341 // the page lists. Specifically, you can't call these functions on full pages
342 // that were detached from the active list.
343 static bool ALWAYS_INLINE partitionPageStateIsActive(const PartitionPage* page)
344 {
345 ASSERT(page != &PartitionRootGeneric::gSeedPage);
346 ASSERT(!page->pageOffset);
347 return (page->numAllocatedSlots > 0 && (page->freelistHead || page->numUnpro visionedSlots));
348 }
349
350 static bool ALWAYS_INLINE partitionPageStateIsFull(const PartitionPage* page)
351 {
352 ASSERT(page != &PartitionRootGeneric::gSeedPage);
353 ASSERT(!page->pageOffset);
354 bool ret = (page->numAllocatedSlots == partitionBucketSlots(page->bucket));
355 if (ret) {
356 ASSERT(!page->freelistHead);
357 ASSERT(!page->numUnprovisionedSlots);
358 }
359 return ret;
360 }
361
362 static bool ALWAYS_INLINE partitionPageStateIsEmpty(const PartitionPage* page)
363 {
364 ASSERT(page != &PartitionRootGeneric::gSeedPage);
365 ASSERT(!page->pageOffset);
366 return (!page->numAllocatedSlots && page->freelistHead);
367 }
368
369 static bool ALWAYS_INLINE partitionPageStateIsDecommitted(const PartitionPage* p age)
370 {
371 ASSERT(page != &PartitionRootGeneric::gSeedPage);
372 ASSERT(!page->pageOffset);
373 bool ret = (!page->numAllocatedSlots && !page->freelistHead);
374 if (ret) {
375 ASSERT(!page->numUnprovisionedSlots);
376 ASSERT(page->emptyCacheIndex == -1);
377 }
378 return ret;
379 }
380
381 static void partitionIncreaseCommittedPages(PartitionRootBase* root, size_t len)
382 {
383 root->totalSizeOfCommittedPages += len;
384 ASSERT(root->totalSizeOfCommittedPages <= root->totalSizeOfSuperPages + root ->totalSizeOfDirectMappedPages);
385 }
386
387 static void partitionDecreaseCommittedPages(PartitionRootBase* root, size_t len)
388 {
389 root->totalSizeOfCommittedPages -= len;
390 ASSERT(root->totalSizeOfCommittedPages <= root->totalSizeOfSuperPages + root ->totalSizeOfDirectMappedPages);
391 }
392
393 static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, void* addr, size_t len)
394 {
395 decommitSystemPages(addr, len);
396 partitionDecreaseCommittedPages(root, len);
397 }
398
399 static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, void* addr, size_t len)
400 {
401 recommitSystemPages(addr, len);
402 partitionIncreaseCommittedPages(root, len);
403 }
404
405 static ALWAYS_INLINE void* partitionAllocPartitionPages(PartitionRootBase* root, int flags, uint16_t numPartitionPages)
406 {
407 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % kPartitionPa geSize));
408 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % kPartitio nPageSize));
409 ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage);
410 size_t totalSize = kPartitionPageSize * numPartitionPages;
411 size_t numPartitionPagesLeft = (root->nextPartitionPageEnd - root->nextParti tionPage) >> kPartitionPageShift;
412 if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) {
413 // In this case, we can still hand out pages from the current super page
414 // allocation.
415 char* ret = root->nextPartitionPage;
416 root->nextPartitionPage += totalSize;
417 partitionIncreaseCommittedPages(root, totalSize);
418 return ret;
419 }
420
421 // Need a new super page. We want to allocate super pages in a continguous
422 // address region as much as possible. This is important for not causing
423 // page table bloat and not fragmenting address spaces in 32 bit architectur es.
424 char* requestedAddress = root->nextSuperPage;
425 char* superPage = reinterpret_cast<char*>(allocPages(requestedAddress, kSupe rPageSize, kSuperPageSize, PageAccessible));
426 if (UNLIKELY(!superPage))
427 return 0;
428
429 root->totalSizeOfSuperPages += kSuperPageSize;
430 partitionIncreaseCommittedPages(root, totalSize);
431
432 root->nextSuperPage = superPage + kSuperPageSize;
433 char* ret = superPage + kPartitionPageSize;
434 root->nextPartitionPage = ret + totalSize;
435 root->nextPartitionPageEnd = root->nextSuperPage - kPartitionPageSize;
436 // Make the first partition page in the super page a guard page, but leave a
437 // hole in the middle.
438 // This is where we put page metadata and also a tiny amount of extent
439 // metadata.
440 setSystemPagesInaccessible(superPage, kSystemPageSize);
441 setSystemPagesInaccessible(superPage + (kSystemPageSize * 2), kPartitionPage Size - (kSystemPageSize * 2));
442 // Also make the last partition page a guard page.
443 setSystemPagesInaccessible(superPage + (kSuperPageSize - kPartitionPageSize) , kPartitionPageSize);
444
445 // If we were after a specific address, but didn't get it, assume that
446 // the system chose a lousy address. Here most OS'es have a default
447 // algorithm that isn't randomized. For example, most Linux
448 // distributions will allocate the mapping directly before the last
449 // successful mapping, which is far from random. So we just get fresh
450 // randomness for the next mapping attempt.
451 if (requestedAddress && requestedAddress != superPage)
452 root->nextSuperPage = 0;
453
454 // We allocated a new super page so update super page metadata.
455 // First check if this is a new extent or not.
456 PartitionSuperPageExtentEntry* latestExtent = reinterpret_cast<PartitionSupe rPageExtentEntry*>(partitionSuperPageToMetadataArea(superPage));
457 // By storing the root in every extent metadata object, we have a fast way
458 // to go from a pointer within the partition to the root object.
459 latestExtent->root = root;
460 // Most new extents will be part of a larger extent, and these three fields
461 // are unused, but we initialize them to 0 so that we get a clear signal
462 // in case they are accidentally used.
463 latestExtent->superPageBase = 0;
464 latestExtent->superPagesEnd = 0;
465 latestExtent->next = 0;
466
467 PartitionSuperPageExtentEntry* currentExtent = root->currentExtent;
468 bool isNewExtent = (superPage != requestedAddress);
469 if (UNLIKELY(isNewExtent)) {
470 if (UNLIKELY(!currentExtent)) {
471 ASSERT(!root->firstExtent);
472 root->firstExtent = latestExtent;
473 } else {
474 ASSERT(currentExtent->superPageBase);
475 currentExtent->next = latestExtent;
476 }
477 root->currentExtent = latestExtent;
478 latestExtent->superPageBase = superPage;
479 latestExtent->superPagesEnd = superPage + kSuperPageSize;
480 } else {
481 // We allocated next to an existing extent so just nudge the size up a l ittle.
482 ASSERT(currentExtent->superPagesEnd);
483 currentExtent->superPagesEnd += kSuperPageSize;
484 ASSERT(ret >= currentExtent->superPageBase && ret < currentExtent->super PagesEnd);
485 }
486 return ret;
487 }
488
489 static ALWAYS_INLINE uint16_t partitionBucketPartitionPages(const PartitionBucke t* bucket)
490 {
491 return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage - 1)) / kNumSystemPagesPerPartitionPage;
492 }
493
494 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page)
495 {
496 ASSERT(partitionPageStateIsDecommitted(page));
497
498 page->numUnprovisionedSlots = partitionBucketSlots(page->bucket);
499 ASSERT(page->numUnprovisionedSlots);
500
501 page->nextPage = nullptr;
502 }
503
504 static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, PartitionBucke t* bucket)
505 {
506 // The bucket never changes. We set it up once.
507 page->bucket = bucket;
508 page->emptyCacheIndex = -1;
509
510 partitionPageReset(page);
511
512 // If this page has just a single slot, do not set up page offsets for any
513 // page metadata other than the first one. This ensures that attempts to
514 // touch invalid page metadata fail.
515 if (page->numUnprovisionedSlots == 1)
516 return;
517
518 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket);
519 char* pageCharPtr = reinterpret_cast<char*>(page);
520 for (uint16_t i = 1; i < numPartitionPages; ++i) {
521 pageCharPtr += kPageMetadataSize;
522 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar Ptr);
523 secondaryPage->pageOffset = i;
524 }
525 }
526
527 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page )
528 {
529 ASSERT(page != &PartitionRootGeneric::gSeedPage);
530 uint16_t numSlots = page->numUnprovisionedSlots;
531 ASSERT(numSlots);
532 PartitionBucket* bucket = page->bucket;
533 // We should only get here when _every_ slot is either used or unprovisioned .
534 // (The third state is "on the freelist". If we have a non-empty freelist, w e should not get here.)
535 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket));
536 // Similarly, make explicitly sure that the freelist is empty.
537 ASSERT(!page->freelistHead);
538 ASSERT(page->numAllocatedSlots >= 0);
539
540 size_t size = bucket->slotSize;
541 char* base = reinterpret_cast<char*>(partitionPageToPointer(page));
542 char* returnObject = base + (size * page->numAllocatedSlots);
543 char* firstFreelistPointer = returnObject + size;
544 char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFr eelistEntry*);
545 // Our goal is to fault as few system pages as possible. We calculate the
546 // page containing the "end" of the returned slot, and then allow freelist
547 // pointers to be written up to the end of that page.
548 char* subPageLimit = reinterpret_cast<char*>(WTF::roundUpToSystemPage(reinte rpret_cast<size_t>(firstFreelistPointer)));
549 char* slotsLimit = returnObject + (size * numSlots);
550 char* freelistLimit = subPageLimit;
551 if (UNLIKELY(slotsLimit < freelistLimit))
552 freelistLimit = slotsLimit;
553
554 uint16_t numNewFreelistEntries = 0;
555 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) {
556 // Only consider used space in the slot span. If we consider wasted
557 // space, we may get an off-by-one when a freelist pointer fits in the
558 // wasted space, but a slot does not.
559 // We know we can fit at least one freelist pointer.
560 numNewFreelistEntries = 1;
561 // Any further entries require space for the whole slot span.
562 numNewFreelistEntries += static_cast<uint16_t>((freelistLimit - firstFre elistPointerExtent) / size);
563 }
564
565 // We always return an object slot -- that's the +1 below.
566 // We do not neccessarily create any new freelist entries, because we cross sub page boundaries frequently for large bucket sizes.
567 ASSERT(numNewFreelistEntries + 1 <= numSlots);
568 numSlots -= (numNewFreelistEntries + 1);
569 page->numUnprovisionedSlots = numSlots;
570 page->numAllocatedSlots++;
571
572 if (LIKELY(numNewFreelistEntries)) {
573 char* freelistPointer = firstFreelistPointer;
574 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry* >(freelistPointer);
575 page->freelistHead = entry;
576 while (--numNewFreelistEntries) {
577 freelistPointer += size;
578 PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreeli stEntry*>(freelistPointer);
579 entry->next = partitionFreelistMask(nextEntry);
580 entry = nextEntry;
581 }
582 entry->next = partitionFreelistMask(0);
583 } else {
584 page->freelistHead = 0;
585 }
586 return returnObject;
587 }
588
589 // This helper function scans a bucket's active page list for a suitable new
590 // active page.
591 // When it finds a suitable new active page (one that has free slots and is not
592 // empty), it is set as the new active page. If there is no suitable new
593 // active page, the current active page is set to the seed page.
594 // As potential pages are scanned, they are tidied up according to their state.
595 // Empty pages are swept on to the empty page list, decommitted pages on to the
596 // decommitted page list and full pages are unlinked from any list.
597 static bool partitionSetNewActivePage(PartitionBucket* bucket)
598 {
599 PartitionPage* page = bucket->activePagesHead;
600 if (page == &PartitionRootBase::gSeedPage)
601 return false;
602
603 PartitionPage* nextPage;
604
605 for (; page; page = nextPage) {
606 nextPage = page->nextPage;
607 ASSERT(page->bucket == bucket);
608 ASSERT(page != bucket->emptyPagesHead);
609 ASSERT(page != bucket->decommittedPagesHead);
610
611 // Deal with empty and decommitted pages.
612 if (LIKELY(partitionPageStateIsActive(page))) {
613 // This page is usable because it has freelist entries, or has
614 // unprovisioned slots we can create freelist entries from.
615 bucket->activePagesHead = page;
616 return true;
617 }
618 if (LIKELY(partitionPageStateIsEmpty(page))) {
619 page->nextPage = bucket->emptyPagesHead;
620 bucket->emptyPagesHead = page;
621 } else if (LIKELY(partitionPageStateIsDecommitted(page))) {
622 page->nextPage = bucket->decommittedPagesHead;
623 bucket->decommittedPagesHead = page;
624 } else {
625 ASSERT(partitionPageStateIsFull(page));
626 // If we get here, we found a full page. Skip over it too, and also
627 // tag it as full (via a negative value). We need it tagged so that
628 // free'ing can tell, and move it back into the active page list.
629 page->numAllocatedSlots = -page->numAllocatedSlots;
630 ++bucket->numFullPages;
631 // numFullPages is a uint16_t for efficient packing so guard against
632 // overflow to be safe.
633 if (UNLIKELY(!bucket->numFullPages))
634 partitionBucketFull();
635 // Not necessary but might help stop accidents.
636 page->nextPage = 0;
637 }
638 }
639
640 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage;
641 return false;
642 }
643
644 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(Pa rtitionPage* page)
645 {
646 ASSERT(partitionBucketIsDirectMapped(page->bucket));
647 return reinterpret_cast<PartitionDirectMapExtent*>(reinterpret_cast<char*>(p age) + 3 * kPageMetadataSize);
648 }
649
650 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, size_t si ze)
651 {
652 size_t* rawSizePtr = partitionPageGetRawSizePtr(page);
653 if (UNLIKELY(rawSizePtr != nullptr))
654 *rawSizePtr = size;
655 }
656
657 static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root, int flags, size_t rawSize)
658 {
659 size_t size = partitionDirectMapSize(rawSize);
660
661 // Because we need to fake looking like a super page, we need to allocate
662 // a bunch of system pages more than "size":
663 // - The first few system pages are the partition page in which the super
664 // page metadata is stored. We fault just one system page out of a partition
665 // page sized clump.
666 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the
667 // massive address space plus randomization instead).
668 size_t mapSize = size + kPartitionPageSize;
669 #if !CPU(64BIT)
670 mapSize += kSystemPageSize;
671 #endif
672 // Round up to the allocation granularity.
673 mapSize += kPageAllocationGranularityOffsetMask;
674 mapSize &= kPageAllocationGranularityBaseMask;
675
676 // TODO: these pages will be zero-filled. Consider internalizing an
677 // allocZeroed() API so we can avoid a memset() entirely in this case.
678 char* ptr = reinterpret_cast<char*>(allocPages(0, mapSize, kSuperPageSize, P ageAccessible));
679 if (UNLIKELY(!ptr))
680 return nullptr;
681
682 size_t committedPageSize = size + kSystemPageSize;
683 root->totalSizeOfDirectMappedPages += committedPageSize;
684 partitionIncreaseCommittedPages(root, committedPageSize);
685
686 char* slot = ptr + kPartitionPageSize;
687 setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), kPartitionPageSize - (kSystemPageSize * 2));
688 #if !CPU(64BIT)
689 setSystemPagesInaccessible(ptr, kSystemPageSize);
690 setSystemPagesInaccessible(slot + size, kSystemPageSize);
691 #endif
692
693 PartitionSuperPageExtentEntry* extent = reinterpret_cast<PartitionSuperPageE xtentEntry*>(partitionSuperPageToMetadataArea(ptr));
694 extent->root = root;
695 // The new structures are all located inside a fresh system page so they
696 // will all be zeroed out. These ASSERTs are for documentation.
697 ASSERT(!extent->superPageBase);
698 ASSERT(!extent->superPagesEnd);
699 ASSERT(!extent->next);
700 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(slot);
701 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(reinterpret_cas t<char*>(page) + (kPageMetadataSize * 2));
702 ASSERT(!page->nextPage);
703 ASSERT(!page->numAllocatedSlots);
704 ASSERT(!page->numUnprovisionedSlots);
705 ASSERT(!page->pageOffset);
706 ASSERT(!page->emptyCacheIndex);
707 page->bucket = bucket;
708 page->freelistHead = reinterpret_cast<PartitionFreelistEntry*>(slot);
709 PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreelistEntry* >(slot);
710 nextEntry->next = partitionFreelistMask(0);
711
712 ASSERT(!bucket->activePagesHead);
713 ASSERT(!bucket->emptyPagesHead);
714 ASSERT(!bucket->decommittedPagesHead);
715 ASSERT(!bucket->numSystemPagesPerSlotSpan);
716 ASSERT(!bucket->numFullPages);
717 bucket->slotSize = size;
718
719 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page);
720 mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize;
721 mapExtent->bucket = bucket;
722
723 // Maintain the doubly-linked list of all direct mappings.
724 mapExtent->nextExtent = root->directMapList;
725 if (mapExtent->nextExtent)
726 mapExtent->nextExtent->prevExtent = mapExtent;
727 mapExtent->prevExtent = nullptr;
728 root->directMapList = mapExtent;
729
730 return page;
731 }
732
733 static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page)
734 {
735 PartitionRootBase* root = partitionPageToRoot(page);
736 const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page );
737 size_t unmapSize = extent->mapSize;
738
739 // Maintain the doubly-linked list of all direct mappings.
740 if (extent->prevExtent) {
741 ASSERT(extent->prevExtent->nextExtent == extent);
742 extent->prevExtent->nextExtent = extent->nextExtent;
743 } else {
744 root->directMapList = extent->nextExtent;
745 }
746 if (extent->nextExtent) {
747 ASSERT(extent->nextExtent->prevExtent == extent);
748 extent->nextExtent->prevExtent = extent->prevExtent;
749 }
750
751 // Add on the size of the trailing guard page and preceeding partition
752 // page.
753 unmapSize += kPartitionPageSize + kSystemPageSize;
754
755 size_t uncommittedPageSize = page->bucket->slotSize + kSystemPageSize;
756 partitionDecreaseCommittedPages(root, uncommittedPageSize);
757 ASSERT(root->totalSizeOfDirectMappedPages >= uncommittedPageSize);
758 root->totalSizeOfDirectMappedPages -= uncommittedPageSize;
759
760 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask));
761
762 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
763 // Account for the mapping starting a partition page before the actual
764 // allocation address.
765 ptr -= kPartitionPageSize;
766
767 freePages(ptr, unmapSize);
768 }
769
770 void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa rtitionBucket* bucket)
771 {
772 // The slow path is called when the freelist is empty.
773 ASSERT(!bucket->activePagesHead->freelistHead);
774
775 PartitionPage* newPage = nullptr;
776
777 // For the partitionAllocGeneric API, we have a bunch of buckets marked
778 // as special cases. We bounce them through to the slow path so that we
779 // can still have a blazing fast hot path due to lack of corner-case
780 // branches.
781 bool returnNull = flags & PartitionAllocReturnNull;
782 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) {
783 ASSERT(size > kGenericMaxBucketed);
784 ASSERT(bucket == &PartitionRootBase::gPagedBucket);
785 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage);
786 if (size > kGenericMaxDirectMapped) {
787 if (returnNull)
788 return nullptr;
789 partitionExcessiveAllocationSize();
790 }
791 newPage = partitionDirectMap(root, flags, size);
792 } else if (LIKELY(partitionSetNewActivePage(bucket))) {
793 // First, did we find an active page in the active pages list?
794 newPage = bucket->activePagesHead;
795 ASSERT(partitionPageStateIsActive(newPage));
796 } else if (LIKELY(bucket->emptyPagesHead != nullptr) || LIKELY(bucket->decom mittedPagesHead != nullptr)) {
797 // Second, look in our lists of empty and decommitted pages.
798 // Check empty pages first, which are preferred, but beware that an
799 // empty page might have been decommitted.
800 while (LIKELY((newPage = bucket->emptyPagesHead) != nullptr)) {
801 ASSERT(newPage->bucket == bucket);
802 ASSERT(partitionPageStateIsEmpty(newPage) || partitionPageStateIsDec ommitted(newPage));
803 bucket->emptyPagesHead = newPage->nextPage;
804 // Accept the empty page unless it got decommitted.
805 if (newPage->freelistHead) {
806 newPage->nextPage = nullptr;
807 break;
808 }
809 ASSERT(partitionPageStateIsDecommitted(newPage));
810 newPage->nextPage = bucket->decommittedPagesHead;
811 bucket->decommittedPagesHead = newPage;
812 }
813 if (UNLIKELY(!newPage) && LIKELY(bucket->decommittedPagesHead != nullptr )) {
814 newPage = bucket->decommittedPagesHead;
815 ASSERT(newPage->bucket == bucket);
816 ASSERT(partitionPageStateIsDecommitted(newPage));
817 bucket->decommittedPagesHead = newPage->nextPage;
818 void* addr = partitionPageToPointer(newPage);
819 partitionRecommitSystemPages(root, addr, partitionBucketBytes(newPag e->bucket));
820 partitionPageReset(newPage);
821 }
822 ASSERT(newPage);
823 } else {
824 // Third. If we get here, we need a brand new page.
825 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket);
826 void* rawPages = partitionAllocPartitionPages(root, flags, numPartitionP ages);
827 if (LIKELY(rawPages != nullptr)) {
828 newPage = partitionPointerToPageNoAlignmentCheck(rawPages);
829 partitionPageSetup(newPage, bucket);
830 }
831 }
832
833 // Bail if we had a memory allocation failure.
834 if (UNLIKELY(!newPage)) {
835 ASSERT(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage);
836 if (returnNull)
837 return nullptr;
838 partitionOutOfMemory(root);
839 }
840
841 bucket = newPage->bucket;
842 ASSERT(bucket != &PartitionRootBase::gPagedBucket);
843 bucket->activePagesHead = newPage;
844 partitionPageSetRawSize(newPage, size);
845
846 // If we found an active page with free slots, or an empty page, we have a
847 // usable freelist head.
848 if (LIKELY(newPage->freelistHead != nullptr)) {
849 PartitionFreelistEntry* entry = newPage->freelistHead;
850 PartitionFreelistEntry* newHead = partitionFreelistMask(entry->next);
851 newPage->freelistHead = newHead;
852 newPage->numAllocatedSlots++;
853 return entry;
854 }
855 // Otherwise, we need to build the freelist.
856 ASSERT(newPage->numUnprovisionedSlots);
857 return partitionPageAllocAndFillFreelist(newPage);
858 }
859
860 static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, Partiti onPage* page)
861 {
862 ASSERT(partitionPageStateIsEmpty(page));
863 ASSERT(!partitionBucketIsDirectMapped(page->bucket));
864 void* addr = partitionPageToPointer(page);
865 partitionDecommitSystemPages(root, addr, partitionBucketBytes(page->bucket)) ;
866
867 // We actually leave the decommitted page in the active list. We'll sweep
868 // it on to the decommitted page list when we next walk the active page
869 // list.
870 // Pulling this trick enables us to use a singly-linked page list for all
871 // cases, which is critical in keeping the page metadata structure down to
872 // 32 bytes in size.
873 page->freelistHead = 0;
874 page->numUnprovisionedSlots = 0;
875 ASSERT(partitionPageStateIsDecommitted(page));
876 }
877
878 static void partitionDecommitPageIfPossible(PartitionRootBase* root, PartitionPa ge* page)
879 {
880 ASSERT(page->emptyCacheIndex >= 0);
881 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans);
882 ASSERT(page == root->globalEmptyPageRing[page->emptyCacheIndex]);
883 page->emptyCacheIndex = -1;
884 if (partitionPageStateIsEmpty(page))
885 partitionDecommitPage(root, page);
886 }
887
888 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page)
889 {
890 ASSERT(partitionPageStateIsEmpty(page));
891 PartitionRootBase* root = partitionPageToRoot(page);
892
893 // If the page is already registered as empty, give it another life.
894 if (page->emptyCacheIndex != -1) {
895 ASSERT(page->emptyCacheIndex >= 0);
896 ASSERT(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans) ;
897 ASSERT(root->globalEmptyPageRing[page->emptyCacheIndex] == page);
898 root->globalEmptyPageRing[page->emptyCacheIndex] = 0;
899 }
900
901 int16_t currentIndex = root->globalEmptyPageRingIndex;
902 PartitionPage* pageToDecommit = root->globalEmptyPageRing[currentIndex];
903 // The page might well have been re-activated, filled up, etc. before we get
904 // around to looking at it here.
905 if (pageToDecommit)
906 partitionDecommitPageIfPossible(root, pageToDecommit);
907
908 // We put the empty slot span on our global list of "pages that were once
909 // empty". thus providing it a bit of breathing room to get re-used before
910 // we really free it. This improves performance, particularly on Mac OS X
911 // which has subpar memory management performance.
912 root->globalEmptyPageRing[currentIndex] = page;
913 page->emptyCacheIndex = currentIndex;
914 ++currentIndex;
915 if (currentIndex == kMaxFreeableSpans)
916 currentIndex = 0;
917 root->globalEmptyPageRingIndex = currentIndex;
918 }
919
920 static void partitionDecommitEmptyPages(PartitionRootBase* root)
921 {
922 for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
923 PartitionPage* page = root->globalEmptyPageRing[i];
924 if (page)
925 partitionDecommitPageIfPossible(root, page);
926 root->globalEmptyPageRing[i] = nullptr;
927 }
928 }
929
930 void partitionFreeSlowPath(PartitionPage* page)
931 {
932 PartitionBucket* bucket = page->bucket;
933 ASSERT(page != &PartitionRootGeneric::gSeedPage);
934 if (LIKELY(page->numAllocatedSlots == 0)) {
935 // Page became fully unused.
936 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) {
937 partitionDirectUnmap(page);
938 return;
939 }
940 // If it's the current active page, change it. We bounce the page to
941 // the empty list as a force towards defragmentation.
942 if (LIKELY(page == bucket->activePagesHead))
943 (void) partitionSetNewActivePage(bucket);
944 ASSERT(bucket->activePagesHead != page);
945
946 partitionPageSetRawSize(page, 0);
947 ASSERT(!partitionPageGetRawSize(page));
948
949 partitionRegisterEmptyPage(page);
950 } else {
951 ASSERT(!partitionBucketIsDirectMapped(bucket));
952 // Ensure that the page is full. That's the only valid case if we
953 // arrive here.
954 ASSERT(page->numAllocatedSlots < 0);
955 // A transition of numAllocatedSlots from 0 to -1 is not legal, and
956 // likely indicates a double-free.
957 SECURITY_CHECK(page->numAllocatedSlots != -1);
958 page->numAllocatedSlots = -page->numAllocatedSlots - 2;
959 ASSERT(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1);
960 // Fully used page became partially used. It must be put back on the
961 // non-full page list. Also make it the current page to increase the
962 // chances of it being filled up again. The old current page will be
963 // the next page.
964 ASSERT(!page->nextPage);
965 if (LIKELY(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage))
966 page->nextPage = bucket->activePagesHead;
967 bucket->activePagesHead = page;
968 --bucket->numFullPages;
969 // Special case: for a partition page with just a single slot, it may
970 // now be empty and we want to run it through the empty logic.
971 if (UNLIKELY(page->numAllocatedSlots == 0))
972 partitionFreeSlowPath(page);
973 }
974 }
975
976 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, PartitionPa ge* page, size_t rawSize)
977 {
978 ASSERT(partitionBucketIsDirectMapped(page->bucket));
979
980 rawSize = partitionCookieSizeAdjustAdd(rawSize);
981
982 // Note that the new size might be a bucketed size; this function is called
983 // whenever we're reallocating a direct mapped allocation.
984 size_t newSize = partitionDirectMapSize(rawSize);
985 if (newSize < kGenericMinDirectMappedDownsize)
986 return false;
987
988 // bucket->slotSize is the current size of the allocation.
989 size_t currentSize = page->bucket->slotSize;
990 if (newSize == currentSize)
991 return true;
992
993 char* charPtr = static_cast<char*>(partitionPageToPointer(page));
994
995 if (newSize < currentSize) {
996 size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize;
997
998 // Don't reallocate in-place if new size is less than 80 % of the full
999 // map size, to avoid holding on to too much unused address space.
1000 if ((newSize / kSystemPageSize) * 5 < (mapSize / kSystemPageSize) * 4)
1001 return false;
1002
1003 // Shrink by decommitting unneeded pages and making them inaccessible.
1004 size_t decommitSize = currentSize - newSize;
1005 partitionDecommitSystemPages(root, charPtr + newSize, decommitSize);
1006 setSystemPagesInaccessible(charPtr + newSize, decommitSize);
1007 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) {
1008 // Grow within the actually allocated memory. Just need to make the
1009 // pages accessible again.
1010 size_t recommitSize = newSize - currentSize;
1011 bool ret = setSystemPagesAccessible(charPtr + currentSize, recommitSize) ;
1012 RELEASE_ASSERT(ret);
1013 partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize);
1014
1015 #if ENABLE(ASSERT)
1016 memset(charPtr + currentSize, kUninitializedByte, recommitSize);
1017 #endif
1018 } else {
1019 // We can't perform the realloc in-place.
1020 // TODO: support this too when possible.
1021 return false;
1022 }
1023
1024 #if ENABLE(ASSERT)
1025 // Write a new trailing cookie.
1026 partitionCookieWriteValue(charPtr + rawSize - kCookieSize);
1027 #endif
1028
1029 partitionPageSetRawSize(page, rawSize);
1030 ASSERT(partitionPageGetRawSize(page) == rawSize);
1031
1032 page->bucket->slotSize = newSize;
1033 return true;
1034 }
1035
1036 void* partitionReallocGeneric(PartitionRootGeneric* root, void* ptr, size_t newS ize, const char* typeName)
1037 {
1038 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
1039 return realloc(ptr, newSize);
1040 #else
1041 if (UNLIKELY(!ptr))
1042 return partitionAllocGeneric(root, newSize, typeName);
1043 if (UNLIKELY(!newSize)) {
1044 partitionFreeGeneric(root, ptr);
1045 return 0;
1046 }
1047
1048 if (newSize > kGenericMaxDirectMapped)
1049 partitionExcessiveAllocationSize();
1050
1051 ASSERT(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr)));
1052
1053 PartitionPage* page = partitionPointerToPage(partitionCookieFreePointerAdjus t(ptr));
1054
1055 if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) {
1056 // We may be able to perform the realloc in place by changing the
1057 // accessibility of memory pages and, if reducing the size, decommitting
1058 // them.
1059 if (partitionReallocDirectMappedInPlace(root, page, newSize)) {
1060 PartitionAllocHooks::reallocHookIfEnabled(ptr, ptr, newSize, typeNam e);
1061 return ptr;
1062 }
1063 }
1064
1065 size_t actualNewSize = partitionAllocActualSize(root, newSize);
1066 size_t actualOldSize = partitionAllocGetSize(ptr);
1067
1068 // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
1069 // new size is a significant percentage smaller. We could do the same if we
1070 // determine it is a win.
1071 if (actualNewSize == actualOldSize) {
1072 // Trying to allocate a block of size newSize would give us a block of
1073 // the same size as the one we've already got, so no point in doing
1074 // anything here.
1075 return ptr;
1076 }
1077
1078 // This realloc cannot be resized in-place. Sadness.
1079 void* ret = partitionAllocGeneric(root, newSize, typeName);
1080 size_t copySize = actualOldSize;
1081 if (newSize < copySize)
1082 copySize = newSize;
1083
1084 memcpy(ret, ptr, copySize);
1085 partitionFreeGeneric(root, ptr);
1086 return ret;
1087 #endif
1088 }
1089
1090 static size_t partitionPurgePage(PartitionPage* page, bool discard)
1091 {
1092 const PartitionBucket* bucket = page->bucket;
1093 size_t slotSize = bucket->slotSize;
1094 if (slotSize < kSystemPageSize || !page->numAllocatedSlots)
1095 return 0;
1096
1097 size_t bucketNumSlots = partitionBucketSlots(bucket);
1098 size_t discardableBytes = 0;
1099
1100 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page));
1101 if (rawSize) {
1102 uint32_t usedBytes = static_cast<uint32_t>(WTF::roundUpToSystemPage(rawS ize));
1103 discardableBytes = bucket->slotSize - usedBytes;
1104 if (discardableBytes && discard) {
1105 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
1106 ptr += usedBytes;
1107 discardSystemPages(ptr, discardableBytes);
1108 }
1109 return discardableBytes;
1110 }
1111
1112 const size_t maxSlotCount = (kPartitionPageSize * kMaxPartitionPagesPerSlotS pan) / kSystemPageSize;
1113 ASSERT(bucketNumSlots <= maxSlotCount);
1114 ASSERT(page->numUnprovisionedSlots < bucketNumSlots);
1115 size_t numSlots = bucketNumSlots - page->numUnprovisionedSlots;
1116 char slotUsage[maxSlotCount];
1117 size_t lastSlot = static_cast<size_t>(-1);
1118 memset(slotUsage, 1, numSlots);
1119 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page));
1120 PartitionFreelistEntry* entry = page->freelistHead;
1121 // First, walk the freelist for this page and make a bitmap of which slots
1122 // are not in use.
1123 while (entry) {
1124 size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slotSize;
1125 ASSERT(slotIndex < numSlots);
1126 slotUsage[slotIndex] = 0;
1127 entry = partitionFreelistMask(entry->next);
1128 // If we have a slot where the masked freelist entry is 0, we can
1129 // actually discard that freelist entry because touching a discarded
1130 // page is guaranteed to return original content or 0.
1131 // (Note that this optimization won't fire on big endian machines
1132 // because the masking function is negation.)
1133 if (!partitionFreelistMask(entry))
1134 lastSlot = slotIndex;
1135 }
1136
1137 // If the slot(s) at the end of the slot span are not in used, we can
1138 // truncate them entirely and rewrite the freelist.
1139 size_t truncatedSlots = 0;
1140 while (!slotUsage[numSlots - 1]) {
1141 truncatedSlots++;
1142 numSlots--;
1143 ASSERT(numSlots);
1144 }
1145 // First, do the work of calculating the discardable bytes. Don't actually
1146 // discard anything unless the discard flag was passed in.
1147 char* beginPtr = nullptr;
1148 char* endPtr = nullptr;
1149 size_t unprovisionedBytes = 0;
1150 if (truncatedSlots) {
1151 beginPtr = ptr + (numSlots * slotSize);
1152 endPtr = beginPtr + (slotSize * truncatedSlots);
1153 beginPtr = reinterpret_cast<char*>(WTF::roundUpToSystemPage(reinterpret_ cast<size_t>(beginPtr)));
1154 // We round the end pointer here up and not down because we're at the
1155 // end of a slot span, so we "own" all the way up the page boundary.
1156 endPtr = reinterpret_cast<char*>(WTF::roundUpToSystemPage(reinterpret_ca st<size_t>(endPtr)));
1157 ASSERT(endPtr <= ptr + partitionBucketBytes(bucket));
1158 if (beginPtr < endPtr) {
1159 unprovisionedBytes = endPtr - beginPtr;
1160 discardableBytes += unprovisionedBytes;
1161 }
1162 }
1163 if (unprovisionedBytes && discard) {
1164 ASSERT(truncatedSlots > 0);
1165 size_t numNewEntries = 0;
1166 page->numUnprovisionedSlots += static_cast<uint16_t>(truncatedSlots);
1167 // Rewrite the freelist.
1168 PartitionFreelistEntry** entryPtr = &page->freelistHead;
1169 for (size_t slotIndex = 0; slotIndex < numSlots; ++slotIndex) {
1170 if (slotUsage[slotIndex])
1171 continue;
1172 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEn try*>(ptr + (slotSize * slotIndex));
1173 *entryPtr = partitionFreelistMask(entry);
1174 entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry);
1175 numNewEntries++;
1176 }
1177 // Terminate the freelist chain.
1178 *entryPtr = nullptr;
1179 // The freelist head is stored unmasked.
1180 page->freelistHead = partitionFreelistMask(page->freelistHead);
1181 ASSERT(numNewEntries == numSlots - page->numAllocatedSlots);
1182 // Discard the memory.
1183 discardSystemPages(beginPtr, unprovisionedBytes);
1184 }
1185
1186 // Next, walk the slots and for any not in use, consider where the system
1187 // page boundaries occur. We can release any system pages back to the
1188 // system as long as we don't interfere with a freelist pointer or an
1189 // adjacent slot.
1190 for (size_t i = 0; i < numSlots; ++i) {
1191 if (slotUsage[i])
1192 continue;
1193 // The first address we can safely discard is just after the freelist
1194 // pointer. There's one quirk: if the freelist pointer is actually a
1195 // null, we can discard that pointer value too.
1196 char* beginPtr = ptr + (i * slotSize);
1197 char* endPtr = beginPtr + slotSize;
1198 if (i != lastSlot)
1199 beginPtr += sizeof(PartitionFreelistEntry);
1200 beginPtr = reinterpret_cast<char*>(WTF::roundUpToSystemPage(reinterpret_ cast<size_t>(beginPtr)));
1201 endPtr = reinterpret_cast<char*>(WTF::roundDownToSystemPage(reinterpret_ cast<size_t>(endPtr)));
1202 if (beginPtr < endPtr) {
1203 size_t partialSlotBytes = endPtr - beginPtr;
1204 discardableBytes += partialSlotBytes;
1205 if (discard)
1206 discardSystemPages(beginPtr, partialSlotBytes);
1207 }
1208 }
1209 return discardableBytes;
1210 }
1211
1212 static void partitionPurgeBucket(PartitionBucket* bucket)
1213 {
1214 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) {
1215 for (PartitionPage* page = bucket->activePagesHead; page; page = page->n extPage) {
1216 ASSERT(page != &PartitionRootGeneric::gSeedPage);
1217 (void) partitionPurgePage(page, true);
1218 }
1219 }
1220 }
1221
1222 void partitionPurgeMemory(PartitionRoot* root, int flags)
1223 {
1224 if (flags & PartitionPurgeDecommitEmptyPages)
1225 partitionDecommitEmptyPages(root);
1226 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
1227 // here because that flag is only useful for allocations >= system page
1228 // size. We only have allocations that large inside generic partitions
1229 // at the moment.
1230 }
1231
1232 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags)
1233 {
1234 SpinLock::Guard guard(root->lock);
1235 if (flags & PartitionPurgeDecommitEmptyPages)
1236 partitionDecommitEmptyPages(root);
1237 if (flags & PartitionPurgeDiscardUnusedSystemPages) {
1238 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
1239 PartitionBucket* bucket = &root->buckets[i];
1240 if (bucket->slotSize >= kSystemPageSize)
1241 partitionPurgeBucket(bucket);
1242 }
1243 }
1244 }
1245
1246 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P artitionPage* page)
1247 {
1248 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket);
1249
1250 if (partitionPageStateIsDecommitted(page)) {
1251 ++statsOut->numDecommittedPages;
1252 return;
1253 }
1254
1255 statsOut->discardableBytes += partitionPurgePage(const_cast<PartitionPage*>( page), false);
1256
1257 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page));
1258 if (rawSize)
1259 statsOut->activeBytes += static_cast<uint32_t>(rawSize);
1260 else
1261 statsOut->activeBytes += (page->numAllocatedSlots * statsOut->bucketSlot Size);
1262
1263 size_t pageBytesResident = WTF::roundUpToSystemPage((bucketNumSlots - page-> numUnprovisionedSlots) * statsOut->bucketSlotSize);
1264 statsOut->residentBytes += pageBytesResident;
1265 if (partitionPageStateIsEmpty(page)) {
1266 statsOut->decommittableBytes += pageBytesResident;
1267 ++statsOut->numEmptyPages;
1268 } else if (partitionPageStateIsFull(page)) {
1269 ++statsOut->numFullPages;
1270 } else {
1271 ASSERT(partitionPageStateIsActive(page));
1272 ++statsOut->numActivePages;
1273 }
1274 }
1275
1276 static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, const PartitionBucket* bucket)
1277 {
1278 ASSERT(!partitionBucketIsDirectMapped(bucket));
1279 statsOut->isValid = false;
1280 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage),
1281 // the bucket might still need to be reported if it has a list of empty,
1282 // decommitted or full pages.
1283 if (bucket->activePagesHead == &PartitionRootGeneric::gSeedPage && !bucket-> emptyPagesHead && !bucket->decommittedPagesHead && !bucket->numFullPages)
1284 return;
1285
1286 memset(statsOut, '\0', sizeof(*statsOut));
1287 statsOut->isValid = true;
1288 statsOut->isDirectMap = false;
1289 statsOut->numFullPages = static_cast<size_t>(bucket->numFullPages);
1290 statsOut->bucketSlotSize = bucket->slotSize;
1291 uint16_t bucketNumSlots = partitionBucketSlots(bucket);
1292 size_t bucketUsefulStorage = statsOut->bucketSlotSize * bucketNumSlots;
1293 statsOut->allocatedPageSize = partitionBucketBytes(bucket);
1294 statsOut->activeBytes = bucket->numFullPages * bucketUsefulStorage;
1295 statsOut->residentBytes = bucket->numFullPages * statsOut->allocatedPageSize ;
1296
1297 for (const PartitionPage* page = bucket->emptyPagesHead; page; page = page-> nextPage) {
1298 ASSERT(partitionPageStateIsEmpty(page) || partitionPageStateIsDecommitte d(page));
1299 partitionDumpPageStats(statsOut, page);
1300 }
1301 for (const PartitionPage* page = bucket->decommittedPagesHead; page; page = page->nextPage) {
1302 ASSERT(partitionPageStateIsDecommitted(page));
1303 partitionDumpPageStats(statsOut, page);
1304 }
1305
1306 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) {
1307 for (const PartitionPage* page = bucket->activePagesHead; page; page = p age->nextPage) {
1308 ASSERT(page != &PartitionRootGeneric::gSeedPage);
1309 partitionDumpPageStats(statsOut, page);
1310 }
1311 }
1312 }
1313
1314 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* part itionName, bool isLightDump, PartitionStatsDumper* partitionStatsDumper)
1315 {
1316 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets];
1317 static const size_t kMaxReportableDirectMaps = 4096;
1318 uint32_t directMapLengths[kMaxReportableDirectMaps];
1319 size_t numDirectMappedAllocations = 0;
1320
1321 {
1322 SpinLock::Guard guard(partition->lock);
1323
1324 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
1325 const PartitionBucket* bucket = &partition->buckets[i];
1326 // Don't report the pseudo buckets that the generic allocator sets u p in
1327 // order to preserve a fast size->bucket map (see
1328 // partitionAllocGenericInit for details).
1329 if (!bucket->activePagesHead)
1330 bucketStats[i].isValid = false;
1331 else
1332 partitionDumpBucketStats(&bucketStats[i], bucket);
1333 }
1334
1335 for (PartitionDirectMapExtent* extent = partition->directMapList; extent ; extent = extent->nextExtent) {
1336 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == exte nt);
1337 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotS ize;
1338 ++numDirectMappedAllocations;
1339 if (numDirectMappedAllocations == kMaxReportableDirectMaps)
1340 break;
1341 }
1342 }
1343
1344 // partitionsDumpBucketStats is called after collecting stats because it
1345 // can try to allocate using PartitionAllocGeneric and it can't obtain the
1346 // lock.
1347 PartitionMemoryStats partitionStats = { 0 };
1348 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages + partit ion->totalSizeOfDirectMappedPages;
1349 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages;
1350 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
1351 if (bucketStats[i].isValid) {
1352 partitionStats.totalResidentBytes += bucketStats[i].residentBytes;
1353 partitionStats.totalActiveBytes += bucketStats[i].activeBytes;
1354 partitionStats.totalDecommittableBytes += bucketStats[i].decommittab leBytes;
1355 partitionStats.totalDiscardableBytes += bucketStats[i].discardableBy tes;
1356 if (!isLightDump)
1357 partitionStatsDumper->partitionsDumpBucketStats(partitionName, & bucketStats[i]);
1358 }
1359 }
1360
1361 size_t directMappedAllocationsTotalSize = 0;
1362 for (size_t i = 0; i < numDirectMappedAllocations; ++i) {
1363 PartitionBucketMemoryStats stats;
1364 memset(&stats, '\0', sizeof(stats));
1365 stats.isValid = true;
1366 stats.isDirectMap = true;
1367 stats.numFullPages = 1;
1368 uint32_t size = directMapLengths[i];
1369 stats.allocatedPageSize = size;
1370 stats.bucketSlotSize = size;
1371 stats.activeBytes = size;
1372 stats.residentBytes = size;
1373 directMappedAllocationsTotalSize += size;
1374 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &stats);
1375 }
1376 partitionStats.totalResidentBytes += directMappedAllocationsTotalSize;
1377 partitionStats.totalActiveBytes += directMappedAllocationsTotalSize;
1378 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats);
1379 }
1380
1381 void partitionDumpStats(PartitionRoot* partition, const char* partitionName, boo l isLightDump, PartitionStatsDumper* partitionStatsDumper)
1382 {
1383 static const size_t kMaxReportableBuckets = 4096 / sizeof(void*);
1384 PartitionBucketMemoryStats memoryStats[kMaxReportableBuckets];
1385 const size_t partitionNumBuckets = partition->numBuckets;
1386 ASSERT(partitionNumBuckets <= kMaxReportableBuckets);
1387
1388 for (size_t i = 0; i < partitionNumBuckets; ++i)
1389 partitionDumpBucketStats(&memoryStats[i], &partition->buckets()[i]);
1390
1391 // partitionsDumpBucketStats is called after collecting stats because it
1392 // can use PartitionAlloc to allocate and this can affect the statistics.
1393 PartitionMemoryStats partitionStats = { 0 };
1394 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages;
1395 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages;
1396 ASSERT(!partition->totalSizeOfDirectMappedPages);
1397 for (size_t i = 0; i < partitionNumBuckets; ++i) {
1398 if (memoryStats[i].isValid) {
1399 partitionStats.totalResidentBytes += memoryStats[i].residentBytes;
1400 partitionStats.totalActiveBytes += memoryStats[i].activeBytes;
1401 partitionStats.totalDecommittableBytes += memoryStats[i].decommittab leBytes;
1402 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBy tes;
1403 if (!isLightDump)
1404 partitionStatsDumper->partitionsDumpBucketStats(partitionName, & memoryStats[i]);
1405 }
1406 }
1407 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats);
1408 }
1409
1410 } // namespace WTF
1411
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698