| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 92 double wasteRatio = (double) waste / (double) pageSize; | 92 double wasteRatio = (double) waste / (double) pageSize; |
| 93 if (wasteRatio < bestWasteRatio) { | 93 if (wasteRatio < bestWasteRatio) { |
| 94 bestWasteRatio = wasteRatio; | 94 bestWasteRatio = wasteRatio; |
| 95 bestPages = i; | 95 bestPages = i; |
| 96 } | 96 } |
| 97 } | 97 } |
| 98 ASSERT(bestPages > 0); | 98 ASSERT(bestPages > 0); |
| 99 return bestPages; | 99 return bestPages; |
| 100 } | 100 } |
| 101 | 101 |
| 102 static ALWAYS_INLINE void partitionPageMarkFree(PartitionPage* page) | |
| 103 { | |
| 104 ASSERT(!partitionPageIsFree(page)); | |
| 105 page->flags |= kPartitionPageFlagFree; | |
| 106 } | |
| 107 | |
| 108 static void parititonAllocBaseInit(PartitionRootBase* root) | 102 static void parititonAllocBaseInit(PartitionRootBase* root) |
| 109 { | 103 { |
| 110 ASSERT(!root->initialized); | 104 ASSERT(!root->initialized); |
| 111 | 105 |
| 112 spinLockLock(&PartitionRootBase::gInitializedLock); | 106 spinLockLock(&PartitionRootBase::gInitializedLock); |
| 113 if (!PartitionRootBase::gInitialized) { | 107 if (!PartitionRootBase::gInitialized) { |
| 114 PartitionRootBase::gInitialized = true; | 108 PartitionRootBase::gInitialized = true; |
| 115 // We mark the seed page as free to make sure it is skipped by our | 109 // We mark the seed page as free to make sure it is skipped by our |
| 116 // logic to find a new active page. | 110 // logic to find a new active page. |
| 117 partitionPageMarkFree(&PartitionRootBase::gSeedPage); | |
| 118 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric:
:gSeedPage; | 111 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric:
:gSeedPage; |
| 119 } | 112 } |
| 120 spinLockUnlock(&PartitionRootBase::gInitializedLock); | 113 spinLockUnlock(&PartitionRootBase::gInitializedLock); |
| 121 | 114 |
| 122 root->initialized = true; | 115 root->initialized = true; |
| 123 root->totalSizeOfSuperPages = 0; | 116 root->totalSizeOfSuperPages = 0; |
| 124 root->nextSuperPage = 0; | 117 root->nextSuperPage = 0; |
| 125 root->nextPartitionPage = 0; | 118 root->nextPartitionPage = 0; |
| 126 root->nextPartitionPageEnd = 0; | 119 root->nextPartitionPageEnd = 0; |
| 127 root->firstExtent = 0; | 120 root->firstExtent = 0; |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 236 // which tries to overflow to a non-existant order. | 229 // which tries to overflow to a non-existant order. |
| 237 *bucketPtr = &PartitionRootGeneric::gPagedBucket; | 230 *bucketPtr = &PartitionRootGeneric::gPagedBucket; |
| 238 } | 231 } |
| 239 | 232 |
| 240 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) | 233 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) |
| 241 { | 234 { |
| 242 // Failure here indicates a memory leak. | 235 // Failure here indicates a memory leak. |
| 243 bool noLeaks = !bucket->numFullPages; | 236 bool noLeaks = !bucket->numFullPages; |
| 244 PartitionPage* page = bucket->activePagesHead; | 237 PartitionPage* page = bucket->activePagesHead; |
| 245 while (page) { | 238 while (page) { |
| 246 if (!partitionPageIsFree(page) && page->numAllocatedSlots) | 239 if (page->numAllocatedSlots) |
| 247 noLeaks = false; | 240 noLeaks = false; |
| 248 page = page->activePageNext; | 241 page = page->nextPage; |
| 249 } | 242 } |
| 250 | 243 |
| 251 return noLeaks; | 244 return noLeaks; |
| 252 } | 245 } |
| 253 | 246 |
| 254 static void partitionAllocBaseShutdown(PartitionRootBase* root) | 247 static void partitionAllocBaseShutdown(PartitionRootBase* root) |
| 255 { | 248 { |
| 256 ASSERT(root->initialized); | 249 ASSERT(root->initialized); |
| 257 root->initialized = false; | 250 root->initialized = false; |
| 258 | 251 |
| (...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 386 } | 379 } |
| 387 | 380 |
| 388 static ALWAYS_INLINE size_t partitionBucketPartitionPages(const PartitionBucket*
bucket) | 381 static ALWAYS_INLINE size_t partitionBucketPartitionPages(const PartitionBucket*
bucket) |
| 389 { | 382 { |
| 390 return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage
- 1)) / kNumSystemPagesPerPartitionPage; | 383 return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage
- 1)) / kNumSystemPagesPerPartitionPage; |
| 391 } | 384 } |
| 392 | 385 |
| 393 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page, PartitionBucke
t* bucket) | 386 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page, PartitionBucke
t* bucket) |
| 394 { | 387 { |
| 395 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 388 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 396 page->flags = 0; | |
| 397 page->numAllocatedSlots = 0; | 389 page->numAllocatedSlots = 0; |
| 398 page->numUnprovisionedSlots = partitionBucketSlots(bucket); | 390 page->numUnprovisionedSlots = partitionBucketSlots(bucket); |
| 399 ASSERT(page->numUnprovisionedSlots); | 391 ASSERT(page->numUnprovisionedSlots); |
| 400 page->bucket = bucket; | 392 page->bucket = bucket; |
| 393 page->nextPage = 0; |
| 401 // NULLing the freelist is not strictly necessary but it makes an ASSERT in
partitionPageFillFreelist simpler. | 394 // NULLing the freelist is not strictly necessary but it makes an ASSERT in
partitionPageFillFreelist simpler. |
| 402 partitionPageSetFreelistHead(page, 0); | 395 page->freelistHead = 0; |
| 403 page->pageOffset = 0; | 396 page->pageOffset = 0; |
| 404 size_t numPartitionPages = partitionBucketPartitionPages(bucket); | 397 size_t numPartitionPages = partitionBucketPartitionPages(bucket); |
| 405 size_t i; | 398 size_t i; |
| 406 char* pageCharPtr = reinterpret_cast<char*>(page); | 399 char* pageCharPtr = reinterpret_cast<char*>(page); |
| 407 for (i = 1; i < numPartitionPages; ++i) { | 400 for (i = 1; i < numPartitionPages; ++i) { |
| 408 pageCharPtr += kPageMetadataSize; | 401 pageCharPtr += kPageMetadataSize; |
| 409 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar
Ptr); | 402 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar
Ptr); |
| 410 secondaryPage->pageOffset = i; | 403 secondaryPage->pageOffset = i; |
| 411 } | 404 } |
| 412 } | 405 } |
| 413 | 406 |
| 414 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page
) | 407 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page
) |
| 415 { | 408 { |
| 416 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 409 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 417 size_t numSlots = page->numUnprovisionedSlots; | 410 size_t numSlots = page->numUnprovisionedSlots; |
| 418 ASSERT(numSlots); | 411 ASSERT(numSlots); |
| 419 PartitionBucket* bucket = page->bucket; | 412 PartitionBucket* bucket = page->bucket; |
| 420 // We should only get here when _every_ slot is either used or unprovisioned
. | 413 // We should only get here when _every_ slot is either used or unprovisioned
. |
| 421 // (The third state is "on the freelist". If we have a non-empty freelist, w
e should not get here.) | 414 // (The third state is "on the freelist". If we have a non-empty freelist, w
e should not get here.) |
| 422 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); | 415 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); |
| 423 // Similarly, make explicitly sure that the freelist is empty. | 416 // Similarly, make explicitly sure that the freelist is empty. |
| 424 ASSERT(!partitionPageFreelistHead(page)); | 417 ASSERT(!page->freelistHead); |
| 418 ASSERT(page->numAllocatedSlots >= 0); |
| 425 | 419 |
| 426 size_t size = bucket->slotSize; | 420 size_t size = bucket->slotSize; |
| 427 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); | 421 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 428 char* returnObject = base + (size * page->numAllocatedSlots); | 422 char* returnObject = base + (size * page->numAllocatedSlots); |
| 429 char* firstFreelistPointer = returnObject + size; | 423 char* firstFreelistPointer = returnObject + size; |
| 430 char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFr
eelistEntry*); | 424 char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFr
eelistEntry*); |
| 431 // Our goal is to fault as few system pages as possible. We calculate the | 425 // Our goal is to fault as few system pages as possible. We calculate the |
| 432 // page containing the "end" of the returned slot, and then allow freelist | 426 // page containing the "end" of the returned slot, and then allow freelist |
| 433 // pointers to be written up to the end of that page. | 427 // pointers to be written up to the end of that page. |
| 434 char* subPageLimit = reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(fi
rstFreelistPointer) + kSystemPageOffsetMask) & kSystemPageBaseMask); | 428 char* subPageLimit = reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(fi
rstFreelistPointer) + kSystemPageOffsetMask) & kSystemPageBaseMask); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 451 // We always return an object slot -- that's the +1 below. | 445 // We always return an object slot -- that's the +1 below. |
| 452 // We do not neccessarily create any new freelist entries, because we cross
sub page boundaries frequently for large bucket sizes. | 446 // We do not neccessarily create any new freelist entries, because we cross
sub page boundaries frequently for large bucket sizes. |
| 453 ASSERT(numNewFreelistEntries + 1 <= numSlots); | 447 ASSERT(numNewFreelistEntries + 1 <= numSlots); |
| 454 numSlots -= (numNewFreelistEntries + 1); | 448 numSlots -= (numNewFreelistEntries + 1); |
| 455 page->numUnprovisionedSlots = numSlots; | 449 page->numUnprovisionedSlots = numSlots; |
| 456 page->numAllocatedSlots++; | 450 page->numAllocatedSlots++; |
| 457 | 451 |
| 458 if (LIKELY(numNewFreelistEntries)) { | 452 if (LIKELY(numNewFreelistEntries)) { |
| 459 char* freelistPointer = firstFreelistPointer; | 453 char* freelistPointer = firstFreelistPointer; |
| 460 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*
>(freelistPointer); | 454 PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*
>(freelistPointer); |
| 461 partitionPageSetFreelistHead(page, entry); | 455 page->freelistHead = entry; |
| 462 while (--numNewFreelistEntries) { | 456 while (--numNewFreelistEntries) { |
| 463 freelistPointer += size; | 457 freelistPointer += size; |
| 464 PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreeli
stEntry*>(freelistPointer); | 458 PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreeli
stEntry*>(freelistPointer); |
| 465 entry->next = partitionFreelistMask(nextEntry); | 459 entry->next = partitionFreelistMask(nextEntry); |
| 466 entry = nextEntry; | 460 entry = nextEntry; |
| 467 } | 461 } |
| 468 entry->next = partitionFreelistMask(0); | 462 entry->next = partitionFreelistMask(0); |
| 469 } else { | 463 } else { |
| 470 partitionPageSetFreelistHead(page, 0); | 464 page->freelistHead = 0; |
| 471 } | 465 } |
| 472 return returnObject; | 466 return returnObject; |
| 473 } | 467 } |
| 474 | 468 |
| 475 // This helper function scans the active page list for a suitable new active | 469 // This helper function scans the active page list for a suitable new active |
| 476 // page, starting at the current active page, and optionally skipping the | 470 // page, starting at the current active page, and optionally skipping the |
| 477 // current active page as a candidate. | 471 // current active page as a candidate. |
| 478 // When it finds a suitable new active page (one that has free slots), it is | 472 // When it finds a suitable new active page (one that has free slots), it is |
| 479 // set as the new active page and true is returned. If there is no suitable new | 473 // set as the new active page and true is returned. If there is no suitable new |
| 480 // active page, false is returned. | 474 // active page, false is returned. |
| 481 static ALWAYS_INLINE bool partitionSetNewActivePage(PartitionBucket* bucket, boo
l currentPageOk) | 475 static ALWAYS_INLINE bool partitionSetNewActivePage(PartitionBucket* bucket, boo
l currentPageOk) |
| 482 { | 476 { |
| 483 PartitionPage* page = bucket->activePagesHead; | 477 PartitionPage* page = bucket->activePagesHead; |
| 478 if (page == &PartitionRootBase::gSeedPage) { |
| 479 ASSERT(!page->nextPage); |
| 480 return false; |
| 481 } |
| 482 |
| 484 if (!currentPageOk) | 483 if (!currentPageOk) |
| 485 page = page->activePageNext; | 484 page = page->nextPage; |
| 486 | 485 |
| 487 for (; page; page = page->activePageNext) { | 486 PartitionPage* nextPage = 0; |
| 487 for (; page; page = nextPage) { |
| 488 nextPage = page->nextPage; |
| 488 ASSERT(page->bucket == bucket->activePagesHead->bucket); | 489 ASSERT(page->bucket == bucket->activePagesHead->bucket); |
| 489 | 490 ASSERT(page != bucket->freePagesHead); |
| 490 // Skip over free pages. We need this check first so that we can trust | 491 ASSERT(!bucket->freePagesHead || page != bucket->freePagesHead->nextPage
); |
| 491 // that the page union field really containts a freelist pointer. | |
| 492 if (UNLIKELY(partitionPageIsFree(page))) | |
| 493 continue; | |
| 494 | 492 |
| 495 // Page is usable if it has something on the freelist, or unprovisioned | 493 // Page is usable if it has something on the freelist, or unprovisioned |
| 496 // slots that can be turned into a freelist. | 494 // slots that can be turned into a freelist. |
| 497 if (LIKELY(partitionPageFreelistHead(page) != 0) || LIKELY(page->numUnpr
ovisionedSlots)) { | 495 if (LIKELY(page->freelistHead != 0) || LIKELY(page->numUnprovisionedSlot
s)) { |
| 498 bucket->activePagesHead = page; | 496 bucket->activePagesHead = page; |
| 499 return true; | 497 return true; |
| 500 } | 498 } |
| 501 // If we get here, we found a full page. Skip over it too, and also tag | 499 |
| 502 // it as full (via a negative value). We need it tagged so that free'ing | 500 ASSERT(page->numAllocatedSlots >= 0); |
| 503 // can tell, and move it back into the active page list. | 501 if (LIKELY(page->numAllocatedSlots == 0)) { |
| 504 ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(
bucket))); | 502 // We hit a free page, so shepherd it on to the free page list. |
| 505 page->numAllocatedSlots = -page->numAllocatedSlots; | 503 page->nextPage = bucket->freePagesHead; |
| 506 ++bucket->numFullPages; | 504 bucket->freePagesHead = page; |
| 505 } else { |
| 506 // If we get here, we found a full page. Skip over it too, and also |
| 507 // tag it as full (via a negative value). We need it tagged so that |
| 508 // free'ing can tell, and move it back into the active page list. |
| 509 ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSl
ots(bucket))); |
| 510 page->numAllocatedSlots = -page->numAllocatedSlots; |
| 511 ++bucket->numFullPages; |
| 512 // Not necessary but might help stop accidents. |
| 513 page->nextPage = 0; |
| 514 } |
| 507 } | 515 } |
| 508 | 516 |
| 509 bucket->activePagesHead->activePageNext = 0; | 517 bucket->activePagesHead->nextPage = 0; |
| 510 return false; | 518 return false; |
| 511 } | 519 } |
| 512 | 520 |
| 513 static ALWAYS_INLINE void partitionPageSetFreePageNext(PartitionPage* page, Part
itionPage* nextFree) | |
| 514 { | |
| 515 ASSERT(partitionPageIsFree(page)); | |
| 516 page->u.freePageNext = nextFree; | |
| 517 } | |
| 518 | |
| 519 static ALWAYS_INLINE PartitionPage* partitionPageFreePageNext(PartitionPage* pag
e) | |
| 520 { | |
| 521 ASSERT(partitionPageIsFree(page)); | |
| 522 return page->u.freePageNext; | |
| 523 } | |
| 524 | |
| 525 static ALWAYS_INLINE bool partitionBucketIsPaged(PartitionBucket* bucket) | 521 static ALWAYS_INLINE bool partitionBucketIsPaged(PartitionBucket* bucket) |
| 526 { | 522 { |
| 527 return !bucket->slotSize; | 523 return !bucket->slotSize; |
| 528 } | 524 } |
| 529 | 525 |
| 530 void* partitionAllocSlowPath(PartitionRootBase* root, size_t size, PartitionBuck
et* bucket) | 526 void* partitionAllocSlowPath(PartitionRootBase* root, size_t size, PartitionBuck
et* bucket) |
| 531 { | 527 { |
| 532 // The slow path is called when the freelist is empty. | 528 // The slow path is called when the freelist is empty. |
| 533 ASSERT(!partitionPageFreelistHead(bucket->activePagesHead)); | 529 ASSERT(!bucket->activePagesHead->freelistHead); |
| 534 | 530 |
| 535 // For the partitionAllocGeneric API, we have a bunch of buckets marked | 531 // For the partitionAllocGeneric API, we have a bunch of buckets marked |
| 536 // as special cases. We bounce them through to the slow path so that we | 532 // as special cases. We bounce them through to the slow path so that we |
| 537 // can still have a blazing fast hot path due to lack of corner-case | 533 // can still have a blazing fast hot path due to lack of corner-case |
| 538 // branches. | 534 // branches. |
| 539 if (UNLIKELY(partitionBucketIsPaged(bucket))) { | 535 if (UNLIKELY(partitionBucketIsPaged(bucket))) { |
| 540 RELEASE_ASSERT(size <= INT_MAX); | 536 RELEASE_ASSERT(size <= INT_MAX); |
| 541 ASSERT(size > kGenericMaxBucketed); | 537 ASSERT(size > kGenericMaxBucketed); |
| 542 // This will be going away real soon now. | 538 // This will be going away real soon now. |
| 543 return fastMalloc(size); | 539 return fastMalloc(size); |
| 544 } | 540 } |
| 545 | 541 |
| 546 // First, look for a usable page in the existing active pages list. | 542 // First, look for a usable page in the existing active pages list. |
| 547 // Change active page, accepting the current page as a candidate. | 543 // Change active page, accepting the current page as a candidate. |
| 548 if (LIKELY(partitionSetNewActivePage(bucket, true))) { | 544 if (LIKELY(partitionSetNewActivePage(bucket, true))) { |
| 549 PartitionPage* newPage = bucket->activePagesHead; | 545 PartitionPage* newPage = bucket->activePagesHead; |
| 550 if (LIKELY(partitionPageFreelistHead(newPage) != 0)) { | 546 if (LIKELY(newPage->freelistHead != 0)) { |
| 551 PartitionFreelistEntry* ret = partitionPageFreelistHead(newPage); | 547 PartitionFreelistEntry* ret = newPage->freelistHead; |
| 552 partitionPageSetFreelistHead(newPage, partitionFreelistMask(ret->nex
t)); | 548 newPage->freelistHead = partitionFreelistMask(ret->next); |
| 553 newPage->numAllocatedSlots++; | 549 newPage->numAllocatedSlots++; |
| 554 return ret; | 550 return ret; |
| 555 } | 551 } |
| 556 ASSERT(newPage->numUnprovisionedSlots); | 552 ASSERT(newPage->numUnprovisionedSlots); |
| 557 return partitionPageAllocAndFillFreelist(newPage); | 553 return partitionPageAllocAndFillFreelist(newPage); |
| 558 } | 554 } |
| 559 | 555 |
| 560 // Second, look in our list of freed but reserved pages. | 556 // Second, look in our list of freed but reserved pages. |
| 561 PartitionPage* newPage = bucket->freePagesHead; | 557 PartitionPage* newPage = bucket->freePagesHead; |
| 562 if (LIKELY(newPage != 0)) { | 558 if (LIKELY(newPage != 0)) { |
| 563 ASSERT(newPage != &PartitionRootGeneric::gSeedPage); | 559 ASSERT(newPage != &PartitionRootGeneric::gSeedPage); |
| 564 bucket->freePagesHead = partitionPageFreePageNext(newPage); | 560 ASSERT(!newPage->freelistHead); |
| 561 ASSERT(!newPage->numAllocatedSlots); |
| 562 ASSERT(!newPage->numUnprovisionedSlots); |
| 563 bucket->freePagesHead = newPage->nextPage; |
| 565 } else { | 564 } else { |
| 566 // Third. If we get here, we need a brand new page. | 565 // Third. If we get here, we need a brand new page. |
| 567 size_t numPartitionPages = partitionBucketPartitionPages(bucket); | 566 size_t numPartitionPages = partitionBucketPartitionPages(bucket); |
| 568 void* rawNewPage = partitionAllocPartitionPages(root, numPartitionPages)
; | 567 void* rawNewPage = partitionAllocPartitionPages(root, numPartitionPages)
; |
| 569 // Skip the alignment check because it depends on page->bucket, which is
not yet set. | 568 // Skip the alignment check because it depends on page->bucket, which is
not yet set. |
| 570 newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage); | 569 newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage); |
| 571 } | 570 } |
| 572 | 571 |
| 573 newPage->activePageNext = 0; | |
| 574 partitionPageReset(newPage, bucket); | 572 partitionPageReset(newPage, bucket); |
| 575 bucket->activePagesHead = newPage; | 573 bucket->activePagesHead = newPage; |
| 576 return partitionPageAllocAndFillFreelist(newPage); | 574 return partitionPageAllocAndFillFreelist(newPage); |
| 577 } | 575 } |
| 578 | 576 |
| 579 static ALWAYS_INLINE void partitionPutPageOnFreelist(PartitionPage* page, Partit
ionBucket* bucket) | 577 static ALWAYS_INLINE void partitionFreePage(PartitionPage* page) |
| 580 { | 578 { |
| 579 ASSERT(page->freelistHead); |
| 580 ASSERT(!page->numAllocatedSlots); |
| 581 partitionUnusePage(page); | 581 partitionUnusePage(page); |
| 582 // We actually leave the freed page in the active list as well as | 582 // We actually leave the freed page in the active list. We'll sweep it on |
| 583 // putting it on the free list. We'll evict it from the active list soon | 583 // to the free page list when we next walk the active page list. Pulling |
| 584 // enough in partitionAllocSlowPath. Pulling this trick enables us to | 584 // this trick enables us to use a singly-linked page list for all cases, |
| 585 // use a singly-linked page list for all cases, which is critical in | 585 // which is critical in keeping the page metadata structure down to 32 |
| 586 // keeping the page metadata structure down to 32-bytes in size. | 586 // bytes in size. |
| 587 partitionPageMarkFree(page); | 587 page->freelistHead = 0; |
| 588 partitionPageSetFreePageNext(page, bucket->freePagesHead); | 588 page->numUnprovisionedSlots = 0; |
| 589 bucket->freePagesHead = page; | |
| 590 } | 589 } |
| 591 | 590 |
| 592 void partitionFreeSlowPath(PartitionPage* page) | 591 void partitionFreeSlowPath(PartitionPage* page) |
| 593 { | 592 { |
| 594 PartitionBucket* bucket = page->bucket; | 593 PartitionBucket* bucket = page->bucket; |
| 595 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 594 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 596 ASSERT(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage); | 595 ASSERT(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage); |
| 597 if (LIKELY(page->numAllocatedSlots == 0)) { | 596 if (LIKELY(page->numAllocatedSlots == 0)) { |
| 598 // Page became fully unused. | 597 // Page became fully unused. |
| 599 // If it's the current page, change it! | 598 // If it's the current page, change it! |
| 600 if (LIKELY(page == bucket->activePagesHead)) { | 599 if (LIKELY(page == bucket->activePagesHead)) { |
| 601 // Change active page, rejecting the current page as a candidate. | 600 // Change active page, rejecting the current page as a candidate. |
| 602 if (!partitionSetNewActivePage(bucket, false)) { | 601 if (!partitionSetNewActivePage(bucket, false)) { |
| 603 // We do not free the last active page in a bucket. | 602 // We do not free the last active page in a bucket. |
| 604 // To do so is a serious performance issue as we can get into | 603 // To do so is a serious performance issue as we can get into |
| 605 // a repeating state change between 0 and 1 objects of a given | 604 // a repeating state change between 0 and 1 objects of a given |
| 606 // size allocated; and each state change incurs either a system | 605 // size allocated; and each state change incurs either a system |
| 607 // call or a page fault, or more. | 606 // call or a page fault, or more. |
| 608 ASSERT(!page->activePageNext); | 607 ASSERT(!page->nextPage); |
| 609 return; | 608 return; |
| 610 } | 609 } |
| 610 // Link the to-be-freed page back in after the new current page, to |
| 611 // avoid losing a reference to it. |
| 612 PartitionPage* currentPage = bucket->activePagesHead; |
| 613 page->nextPage = currentPage->nextPage; |
| 614 currentPage->nextPage = page; |
| 611 } | 615 } |
| 612 partitionPutPageOnFreelist(page, bucket); | 616 partitionFreePage(page); |
| 613 } else { | 617 } else { |
| 614 // Ensure that the page is full. That's the only valid case if we | 618 // Ensure that the page is full. That's the only valid case if we |
| 615 // arrive here. | 619 // arrive here. |
| 616 ASSERT(page->numAllocatedSlots < 0); | 620 ASSERT(page->numAllocatedSlots < 0); |
| 617 page->numAllocatedSlots = -page->numAllocatedSlots - 2; | 621 page->numAllocatedSlots = -page->numAllocatedSlots - 2; |
| 618 ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(
bucket) - 1)); | 622 ASSERT(page->numAllocatedSlots == static_cast<int>(partitionBucketSlots(
bucket) - 1)); |
| 619 // Fully used page became partially used. It must be put back on the | 623 // Fully used page became partially used. It must be put back on the |
| 620 // non-full page list. Also make it the current page to increase the | 624 // non-full page list. Also make it the current page to increase the |
| 621 // chances of it being filled up again. The old current page will be | 625 // chances of it being filled up again. The old current page will be |
| 622 // the next page. | 626 // the next page. |
| 623 page->activePageNext = bucket->activePagesHead; | 627 page->nextPage = bucket->activePagesHead; |
| 624 bucket->activePagesHead = page; | 628 bucket->activePagesHead = page; |
| 625 --bucket->numFullPages; | 629 --bucket->numFullPages; |
| 626 // Special case: for a partition page with just a single slot, it may | 630 // Special case: for a partition page with just a single slot, it may |
| 627 // now be empty and we want to run it through the empty logic. | 631 // now be empty and we want to run it through the empty logic. |
| 628 if (UNLIKELY(page->numAllocatedSlots == 0)) | 632 if (UNLIKELY(page->numAllocatedSlots == 0)) |
| 629 partitionFreeSlowPath(page); | 633 partitionFreeSlowPath(page); |
| 630 // Note: there's an opportunity here to look to see if the old active | 634 // Note: there's an opportunity here to look to see if the old active |
| 631 // page is now freeable. | 635 // page is now freeable. |
| 632 } | 636 } |
| 633 } | 637 } |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 724 for (i = 0; i < root.numBuckets; ++i) { | 728 for (i = 0; i < root.numBuckets; ++i) { |
| 725 const PartitionBucket& bucket = root.buckets()[i]; | 729 const PartitionBucket& bucket = root.buckets()[i]; |
| 726 if (bucket.activePagesHead == &PartitionRootGeneric::gSeedPage && !bucke
t.freePagesHead && !bucket.numFullPages) { | 730 if (bucket.activePagesHead == &PartitionRootGeneric::gSeedPage && !bucke
t.freePagesHead && !bucket.numFullPages) { |
| 727 // Empty bucket with no freelist or full pages. Skip reporting it. | 731 // Empty bucket with no freelist or full pages. Skip reporting it. |
| 728 continue; | 732 continue; |
| 729 } | 733 } |
| 730 size_t numFreePages = 0; | 734 size_t numFreePages = 0; |
| 731 PartitionPage* freePages = bucket.freePagesHead; | 735 PartitionPage* freePages = bucket.freePagesHead; |
| 732 while (freePages) { | 736 while (freePages) { |
| 733 ++numFreePages; | 737 ++numFreePages; |
| 734 freePages = partitionPageFreePageNext(freePages); | 738 freePages = freePages->nextPage; |
| 735 } | 739 } |
| 736 size_t bucketSlotSize = bucket.slotSize; | 740 size_t bucketSlotSize = bucket.slotSize; |
| 737 size_t bucketNumSlots = partitionBucketSlots(&bucket); | 741 size_t bucketNumSlots = partitionBucketSlots(&bucket); |
| 738 size_t bucketUsefulStorage = bucketSlotSize * bucketNumSlots; | 742 size_t bucketUsefulStorage = bucketSlotSize * bucketNumSlots; |
| 739 size_t bucketPageSize = bucket.numSystemPagesPerSlotSpan * kSystemPageSi
ze; | 743 size_t bucketPageSize = bucket.numSystemPagesPerSlotSpan * kSystemPageSi
ze; |
| 740 size_t bucketWaste = bucketPageSize - bucketUsefulStorage; | 744 size_t bucketWaste = bucketPageSize - bucketUsefulStorage; |
| 741 size_t numActiveBytes = bucket.numFullPages * bucketUsefulStorage; | 745 size_t numActiveBytes = bucket.numFullPages * bucketUsefulStorage; |
| 742 size_t numResidentBytes = bucket.numFullPages * bucketPageSize; | 746 size_t numResidentBytes = bucket.numFullPages * bucketPageSize; |
| 743 size_t numFreeableBytes = 0; | 747 size_t numFreeableBytes = 0; |
| 744 size_t numActivePages = 0; | 748 size_t numActivePages = 0; |
| 745 const PartitionPage* page = bucket.activePagesHead; | 749 const PartitionPage* page = bucket.activePagesHead; |
| 746 do { | 750 do { |
| 747 if (page != &PartitionRootGeneric::gSeedPage) { | 751 if (page != &PartitionRootGeneric::gSeedPage) { |
| 748 ++numActivePages; | 752 ++numActivePages; |
| 749 numActiveBytes += (page->numAllocatedSlots * bucketSlotSize); | 753 numActiveBytes += (page->numAllocatedSlots * bucketSlotSize); |
| 750 size_t pageBytesResident = (bucketNumSlots - page->numUnprovisio
nedSlots) * bucketSlotSize; | 754 size_t pageBytesResident = (bucketNumSlots - page->numUnprovisio
nedSlots) * bucketSlotSize; |
| 751 // Round up to system page size. | 755 // Round up to system page size. |
| 752 pageBytesResident = (pageBytesResident + kSystemPageOffsetMask)
& kSystemPageBaseMask; | 756 pageBytesResident = (pageBytesResident + kSystemPageOffsetMask)
& kSystemPageBaseMask; |
| 753 numResidentBytes += pageBytesResident; | 757 numResidentBytes += pageBytesResident; |
| 754 if (!page->numAllocatedSlots) | 758 if (!page->numAllocatedSlots) |
| 755 numFreeableBytes += pageBytesResident; | 759 numFreeableBytes += pageBytesResident; |
| 756 } | 760 } |
| 757 page = page->activePageNext; | 761 page = page->nextPage; |
| 758 } while (page != bucket.activePagesHead); | 762 } while (page != bucket.activePagesHead); |
| 759 totalLive += numActiveBytes; | 763 totalLive += numActiveBytes; |
| 760 totalResident += numResidentBytes; | 764 totalResident += numResidentBytes; |
| 761 totalFreeable += numFreeableBytes; | 765 totalFreeable += numFreeableBytes; |
| 762 printf("bucket size %zu (pageSize %zu waste %zu): %zu alloc/%zu commit/%
zu freeable bytes, %zu/%zu/%zu full/active/free pages\n", bucketSlotSize, bucket
PageSize, bucketWaste, numActiveBytes, numResidentBytes, numFreeableBytes, stati
c_cast<size_t>(bucket.numFullPages), numActivePages, numFreePages); | 766 printf("bucket size %zu (pageSize %zu waste %zu): %zu alloc/%zu commit/%
zu freeable bytes, %zu/%zu/%zu full/active/free pages\n", bucketSlotSize, bucket
PageSize, bucketWaste, numActiveBytes, numResidentBytes, numFreeableBytes, stati
c_cast<size_t>(bucket.numFullPages), numActivePages, numFreePages); |
| 763 } | 767 } |
| 764 printf("total live: %zu bytes\n", totalLive); | 768 printf("total live: %zu bytes\n", totalLive); |
| 765 printf("total resident: %zu bytes\n", totalResident); | 769 printf("total resident: %zu bytes\n", totalResident); |
| 766 printf("total freeable: %zu bytes\n", totalFreeable); | 770 printf("total freeable: %zu bytes\n", totalFreeable); |
| 767 fflush(stdout); | 771 fflush(stdout); |
| 768 } | 772 } |
| 769 | 773 |
| 770 #endif // !NDEBUG | 774 #endif // !NDEBUG |
| 771 | 775 |
| 772 } // namespace WTF | 776 } // namespace WTF |
| 773 | 777 |
| OLD | NEW |