| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 105 spinLockLock(&PartitionRootBase::gInitializedLock); | 105 spinLockLock(&PartitionRootBase::gInitializedLock); |
| 106 if (!PartitionRootBase::gInitialized) { | 106 if (!PartitionRootBase::gInitialized) { |
| 107 PartitionRootBase::gInitialized = true; | 107 PartitionRootBase::gInitialized = true; |
| 108 // We mark the seed page as free to make sure it is skipped by our | 108 // We mark the seed page as free to make sure it is skipped by our |
| 109 // logic to find a new active page. | 109 // logic to find a new active page. |
| 110 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric:
:gSeedPage; | 110 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric:
:gSeedPage; |
| 111 } | 111 } |
| 112 spinLockUnlock(&PartitionRootBase::gInitializedLock); | 112 spinLockUnlock(&PartitionRootBase::gInitializedLock); |
| 113 | 113 |
| 114 root->initialized = true; | 114 root->initialized = true; |
| 115 root->totalSizeOfCommittedPages = 0; |
| 115 root->totalSizeOfSuperPages = 0; | 116 root->totalSizeOfSuperPages = 0; |
| 116 root->nextSuperPage = 0; | 117 root->nextSuperPage = 0; |
| 117 root->nextPartitionPage = 0; | 118 root->nextPartitionPage = 0; |
| 118 root->nextPartitionPageEnd = 0; | 119 root->nextPartitionPageEnd = 0; |
| 119 root->firstExtent = 0; | 120 root->firstExtent = 0; |
| 120 root->currentExtent = 0; | 121 root->currentExtent = 0; |
| 121 | 122 |
| 122 memset(&root->globalEmptyPageRing, '\0', sizeof(root->globalEmptyPageRing)); | 123 memset(&root->globalEmptyPageRing, '\0', sizeof(root->globalEmptyPageRing)); |
| 123 root->globalEmptyPageRingIndex = 0; | 124 root->globalEmptyPageRingIndex = 0; |
| 124 | 125 |
| (...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 301 static NEVER_INLINE void partitionOutOfMemory() | 302 static NEVER_INLINE void partitionOutOfMemory() |
| 302 { | 303 { |
| 303 IMMEDIATE_CRASH(); | 304 IMMEDIATE_CRASH(); |
| 304 } | 305 } |
| 305 | 306 |
| 306 static NEVER_INLINE void partitionFull() | 307 static NEVER_INLINE void partitionFull() |
| 307 { | 308 { |
| 308 IMMEDIATE_CRASH(); | 309 IMMEDIATE_CRASH(); |
| 309 } | 310 } |
| 310 | 311 |
| 312 static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root,
void* addr, size_t len) |
| 313 { |
| 314 decommitSystemPages(addr, len); |
| 315 ASSERT(root->totalSizeOfCommittedPages > len); |
| 316 root->totalSizeOfCommittedPages -= len; |
| 317 } |
| 318 |
| 319 static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root,
void* addr, size_t len) |
| 320 { |
| 321 recommitSystemPages(addr, len); |
| 322 root->totalSizeOfCommittedPages += len; |
| 323 } |
| 324 |
| 311 static ALWAYS_INLINE void* partitionAllocPartitionPages(PartitionRootBase* root,
int flags, size_t numPartitionPages) | 325 static ALWAYS_INLINE void* partitionAllocPartitionPages(PartitionRootBase* root,
int flags, size_t numPartitionPages) |
| 312 { | 326 { |
| 313 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % kPartitionPa
geSize)); | 327 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % kPartitionPa
geSize)); |
| 314 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % kPartitio
nPageSize)); | 328 ASSERT(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % kPartitio
nPageSize)); |
| 315 RELEASE_ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage); | 329 RELEASE_ASSERT(numPartitionPages <= kNumPartitionPagesPerSuperPage); |
| 316 size_t totalSize = kPartitionPageSize * numPartitionPages; | 330 size_t totalSize = kPartitionPageSize * numPartitionPages; |
| 331 root->totalSizeOfCommittedPages += totalSize; |
| 317 size_t numPartitionPagesLeft = (root->nextPartitionPageEnd - root->nextParti
tionPage) >> kPartitionPageShift; | 332 size_t numPartitionPagesLeft = (root->nextPartitionPageEnd - root->nextParti
tionPage) >> kPartitionPageShift; |
| 318 if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { | 333 if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { |
| 319 // In this case, we can still hand out pages from the current super page | 334 // In this case, we can still hand out pages from the current super page |
| 320 // allocation. | 335 // allocation. |
| 321 char* ret = root->nextPartitionPage; | 336 char* ret = root->nextPartitionPage; |
| 322 root->nextPartitionPage += totalSize; | 337 root->nextPartitionPage += totalSize; |
| 323 return ret; | 338 return ret; |
| 324 } | 339 } |
| 325 | 340 |
| 326 // Need a new super page. | 341 // Need a new super page. |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 375 currentExtent->superPagesEnd += kSuperPageSize; | 390 currentExtent->superPagesEnd += kSuperPageSize; |
| 376 ASSERT(ret >= currentExtent->superPageBase && ret < currentExtent->super
PagesEnd); | 391 ASSERT(ret >= currentExtent->superPageBase && ret < currentExtent->super
PagesEnd); |
| 377 } | 392 } |
| 378 // By storing the root in every extent metadata object, we have a fast way | 393 // By storing the root in every extent metadata object, we have a fast way |
| 379 // to go from a pointer within the partition to the root object. | 394 // to go from a pointer within the partition to the root object. |
| 380 latestExtent->root = root; | 395 latestExtent->root = root; |
| 381 | 396 |
| 382 return ret; | 397 return ret; |
| 383 } | 398 } |
| 384 | 399 |
| 385 static ALWAYS_INLINE void partitionUnusePage(PartitionPage* page) | 400 static ALWAYS_INLINE void partitionUnusePage(PartitionRootBase* root, PartitionP
age* page) |
| 386 { | 401 { |
| 387 ASSERT(page->bucket->numSystemPagesPerSlotSpan); | 402 ASSERT(page->bucket->numSystemPagesPerSlotSpan); |
| 388 void* addr = partitionPageToPointer(page); | 403 void* addr = partitionPageToPointer(page); |
| 389 decommitSystemPages(addr, page->bucket->numSystemPagesPerSlotSpan * kSystemP
ageSize); | 404 partitionDecommitSystemPages(root, addr, page->bucket->numSystemPagesPerSlot
Span * kSystemPageSize); |
| 390 } | 405 } |
| 391 | 406 |
| 392 static ALWAYS_INLINE size_t partitionBucketSlots(const PartitionBucket* bucket) | 407 static ALWAYS_INLINE size_t partitionBucketSlots(const PartitionBucket* bucket) |
| 393 { | 408 { |
| 394 return (bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / bucket->slotS
ize; | 409 return (bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / bucket->slotS
ize; |
| 395 } | 410 } |
| 396 | 411 |
| 397 static ALWAYS_INLINE size_t partitionBucketPartitionPages(const PartitionBucket*
bucket) | 412 static ALWAYS_INLINE size_t partitionBucketPartitionPages(const PartitionBucket*
bucket) |
| 398 { | 413 { |
| 399 return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage
- 1)) / kNumSystemPagesPerPartitionPage; | 414 return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage
- 1)) / kNumSystemPagesPerPartitionPage; |
| (...skipping 268 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 668 // Second, look in our list of freed but reserved pages. | 683 // Second, look in our list of freed but reserved pages. |
| 669 PartitionPage* newPage = bucket->freePagesHead; | 684 PartitionPage* newPage = bucket->freePagesHead; |
| 670 if (LIKELY(newPage != 0)) { | 685 if (LIKELY(newPage != 0)) { |
| 671 ASSERT(newPage != &PartitionRootGeneric::gSeedPage); | 686 ASSERT(newPage != &PartitionRootGeneric::gSeedPage); |
| 672 ASSERT(!newPage->freelistHead); | 687 ASSERT(!newPage->freelistHead); |
| 673 ASSERT(!newPage->numAllocatedSlots); | 688 ASSERT(!newPage->numAllocatedSlots); |
| 674 ASSERT(!newPage->numUnprovisionedSlots); | 689 ASSERT(!newPage->numUnprovisionedSlots); |
| 675 ASSERT(newPage->freeCacheIndex == -1); | 690 ASSERT(newPage->freeCacheIndex == -1); |
| 676 bucket->freePagesHead = newPage->nextPage; | 691 bucket->freePagesHead = newPage->nextPage; |
| 677 void* addr = partitionPageToPointer(newPage); | 692 void* addr = partitionPageToPointer(newPage); |
| 678 recommitSystemPages(addr, newPage->bucket->numSystemPagesPerSlotSpan * k
SystemPageSize); | 693 partitionRecommitSystemPages(root, addr, newPage->bucket->numSystemPages
PerSlotSpan * kSystemPageSize); |
| 679 } else { | 694 } else { |
| 680 // Third. If we get here, we need a brand new page. | 695 // Third. If we get here, we need a brand new page. |
| 681 size_t numPartitionPages = partitionBucketPartitionPages(bucket); | 696 size_t numPartitionPages = partitionBucketPartitionPages(bucket); |
| 682 void* rawNewPage = partitionAllocPartitionPages(root, flags, numPartitio
nPages); | 697 void* rawNewPage = partitionAllocPartitionPages(root, flags, numPartitio
nPages); |
| 683 if (UNLIKELY(!rawNewPage)) { | 698 if (UNLIKELY(!rawNewPage)) { |
| 684 ASSERT(returnNull); | 699 ASSERT(returnNull); |
| 685 return 0; | 700 return 0; |
| 686 } | 701 } |
| 687 // Skip the alignment check because it depends on page->bucket, which is
not yet set. | 702 // Skip the alignment check because it depends on page->bucket, which is
not yet set. |
| 688 newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage); | 703 newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage); |
| 689 } | 704 } |
| 690 | 705 |
| 691 partitionPageReset(newPage, bucket); | 706 partitionPageReset(newPage, bucket); |
| 692 bucket->activePagesHead = newPage; | 707 bucket->activePagesHead = newPage; |
| 693 return partitionPageAllocAndFillFreelist(newPage); | 708 return partitionPageAllocAndFillFreelist(newPage); |
| 694 } | 709 } |
| 695 | 710 |
| 696 static ALWAYS_INLINE void partitionFreePage(PartitionPage* page) | 711 static ALWAYS_INLINE void partitionFreePage(PartitionRootBase* root, PartitionPa
ge* page) |
| 697 { | 712 { |
| 698 ASSERT(page->freelistHead); | 713 ASSERT(page->freelistHead); |
| 699 ASSERT(!page->numAllocatedSlots); | 714 ASSERT(!page->numAllocatedSlots); |
| 700 partitionUnusePage(page); | 715 partitionUnusePage(root, page); |
| 701 // We actually leave the freed page in the active list. We'll sweep it on | 716 // We actually leave the freed page in the active list. We'll sweep it on |
| 702 // to the free page list when we next walk the active page list. Pulling | 717 // to the free page list when we next walk the active page list. Pulling |
| 703 // this trick enables us to use a singly-linked page list for all cases, | 718 // this trick enables us to use a singly-linked page list for all cases, |
| 704 // which is critical in keeping the page metadata structure down to 32 | 719 // which is critical in keeping the page metadata structure down to 32 |
| 705 // bytes in size. | 720 // bytes in size. |
| 706 page->freelistHead = 0; | 721 page->freelistHead = 0; |
| 707 page->numUnprovisionedSlots = 0; | 722 page->numUnprovisionedSlots = 0; |
| 708 } | 723 } |
| 709 | 724 |
| 710 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) | 725 static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) |
| 711 { | 726 { |
| 712 PartitionRootBase* root = partitionPageToRoot(page); | 727 PartitionRootBase* root = partitionPageToRoot(page); |
| 728 |
| 713 // If the page is already registered as empty, give it another life. | 729 // If the page is already registered as empty, give it another life. |
| 714 if (page->freeCacheIndex != -1) { | 730 if (page->freeCacheIndex != -1) { |
| 715 ASSERT(page->freeCacheIndex >= 0); | 731 ASSERT(page->freeCacheIndex >= 0); |
| 716 ASSERT(static_cast<unsigned>(page->freeCacheIndex) < kMaxFreeableSpans); | 732 ASSERT(static_cast<unsigned>(page->freeCacheIndex) < kMaxFreeableSpans); |
| 717 ASSERT(root->globalEmptyPageRing[page->freeCacheIndex] == page); | 733 ASSERT(root->globalEmptyPageRing[page->freeCacheIndex] == page); |
| 718 root->globalEmptyPageRing[page->freeCacheIndex] = 0; | 734 root->globalEmptyPageRing[page->freeCacheIndex] = 0; |
| 719 } | 735 } |
| 720 | 736 |
| 721 size_t currentIndex = root->globalEmptyPageRingIndex; | 737 size_t currentIndex = root->globalEmptyPageRingIndex; |
| 722 PartitionPage* pageToFree = root->globalEmptyPageRing[currentIndex]; | 738 PartitionPage* pageToFree = root->globalEmptyPageRing[currentIndex]; |
| 723 // The page might well have been re-activated, filled up, etc. before we get | 739 // The page might well have been re-activated, filled up, etc. before we get |
| 724 // around to looking at it here. | 740 // around to looking at it here. |
| 725 if (pageToFree) { | 741 if (pageToFree) { |
| 726 ASSERT(pageToFree != &PartitionRootBase::gSeedPage); | 742 ASSERT(pageToFree != &PartitionRootBase::gSeedPage); |
| 727 ASSERT(pageToFree->freeCacheIndex >= 0); | 743 ASSERT(pageToFree->freeCacheIndex >= 0); |
| 728 ASSERT(static_cast<unsigned>(pageToFree->freeCacheIndex) < kMaxFreeableS
pans); | 744 ASSERT(static_cast<unsigned>(pageToFree->freeCacheIndex) < kMaxFreeableS
pans); |
| 729 ASSERT(pageToFree == root->globalEmptyPageRing[pageToFree->freeCacheInde
x]); | 745 ASSERT(pageToFree == root->globalEmptyPageRing[pageToFree->freeCacheInde
x]); |
| 730 if (!pageToFree->numAllocatedSlots && pageToFree->freelistHead) { | 746 if (!pageToFree->numAllocatedSlots && pageToFree->freelistHead) { |
| 731 // The page is still empty, and not freed, so _really_ free it. | 747 // The page is still empty, and not freed, so _really_ free it. |
| 732 partitionFreePage(pageToFree); | 748 partitionFreePage(root, pageToFree); |
| 733 } | 749 } |
| 734 pageToFree->freeCacheIndex = -1; | 750 pageToFree->freeCacheIndex = -1; |
| 735 } | 751 } |
| 736 | 752 |
| 737 // We put the empty slot span on our global list of "pages that were once | 753 // We put the empty slot span on our global list of "pages that were once |
| 738 // empty". thus providing it a bit of breathing room to get re-used before | 754 // empty". thus providing it a bit of breathing room to get re-used before |
| 739 // we really free it. This improves performance, particularly on Mac OS X | 755 // we really free it. This improves performance, particularly on Mac OS X |
| 740 // which has subpar memory management performance. | 756 // which has subpar memory management performance. |
| 741 root->globalEmptyPageRing[currentIndex] = page; | 757 root->globalEmptyPageRing[currentIndex] = page; |
| 742 page->freeCacheIndex = currentIndex; | 758 page->freeCacheIndex = currentIndex; |
| 743 ++currentIndex; | 759 ++currentIndex; |
| 744 if (currentIndex == kMaxFreeableSpans) | 760 if (currentIndex == kMaxFreeableSpans) |
| 745 currentIndex = 0; | 761 currentIndex = 0; |
| 746 root->globalEmptyPageRingIndex = currentIndex; | 762 root->globalEmptyPageRingIndex = currentIndex; |
| 747 } | 763 } |
| 748 | 764 |
| 749 void partitionFreeSlowPath(PartitionPage* page) | 765 void partitionFreeSlowPath(PartitionPage* page) |
| 750 { | 766 { |
| 751 PartitionBucket* bucket = page->bucket; | 767 PartitionBucket* bucket = page->bucket; |
| 752 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 768 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 753 ASSERT(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage); | 769 ASSERT(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage); |
| 754 if (LIKELY(page->numAllocatedSlots == 0)) { | 770 if (LIKELY(page->numAllocatedSlots == 0)) { |
| 755 // Page became fully unused. | 771 // Page became fully unused. |
| 756 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 772 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
| 757 partitionDirectUnmap(page); | 773 partitionDirectUnmap(page); |
| 758 return; | 774 return; |
| 759 } | 775 } |
| 760 // If it's the current page, attempt to change it. We'd prefer to leave | 776 // If it's the current active page, attempt to change it. We'd prefer to
leave |
| 761 // the page empty as a gentle force towards defragmentation. | 777 // the page empty as a gentle force towards defragmentation. |
| 762 if (LIKELY(page == bucket->activePagesHead) && page->nextPage) { | 778 if (LIKELY(page == bucket->activePagesHead) && page->nextPage) { |
| 763 if (partitionSetNewActivePage(page->nextPage)) { | 779 if (partitionSetNewActivePage(page->nextPage)) { |
| 764 ASSERT(bucket->activePagesHead != page); | 780 ASSERT(bucket->activePagesHead != page); |
| 765 // Link the empty page back in after the new current page, to | 781 // Link the empty page back in after the new current page, to |
| 766 // avoid losing a reference to it. | 782 // avoid losing a reference to it. |
| 767 // TODO: consider walking the list to link the empty page after | 783 // TODO: consider walking the list to link the empty page after |
| 768 // all non-empty pages? | 784 // all non-empty pages? |
| 769 PartitionPage* currentPage = bucket->activePagesHead; | 785 PartitionPage* currentPage = bucket->activePagesHead; |
| 770 page->nextPage = currentPage->nextPage; | 786 page->nextPage = currentPage->nextPage; |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 820 if (newSize < currentSize) { | 836 if (newSize < currentSize) { |
| 821 size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize; | 837 size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize; |
| 822 | 838 |
| 823 // Don't reallocate in-place if new size is less than 80 % of the full | 839 // Don't reallocate in-place if new size is less than 80 % of the full |
| 824 // map size, to avoid holding on to too much unused address space. | 840 // map size, to avoid holding on to too much unused address space. |
| 825 if ((newSize / kSystemPageSize) * 5 < (mapSize / kSystemPageSize) * 4) | 841 if ((newSize / kSystemPageSize) * 5 < (mapSize / kSystemPageSize) * 4) |
| 826 return false; | 842 return false; |
| 827 | 843 |
| 828 // Shrink by decommitting unneeded pages and making them inaccessible. | 844 // Shrink by decommitting unneeded pages and making them inaccessible. |
| 829 size_t decommitSize = currentSize - newSize; | 845 size_t decommitSize = currentSize - newSize; |
| 830 decommitSystemPages(charPtr + newSize, decommitSize); | 846 partitionDecommitSystemPages(root, charPtr + newSize, decommitSize); |
| 831 setSystemPagesInaccessible(charPtr + newSize, decommitSize); | 847 setSystemPagesInaccessible(charPtr + newSize, decommitSize); |
| 832 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { | 848 } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { |
| 833 // Grow within the actually allocated memory. Just need to make the | 849 // Grow within the actually allocated memory. Just need to make the |
| 834 // pages accessible again. | 850 // pages accessible again. |
| 835 size_t recommitSize = newSize - currentSize; | 851 size_t recommitSize = newSize - currentSize; |
| 836 setSystemPagesAccessible(charPtr + currentSize, recommitSize); | 852 setSystemPagesAccessible(charPtr + currentSize, recommitSize); |
| 837 recommitSystemPages(charPtr + currentSize, recommitSize); | 853 partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize); |
| 838 | 854 |
| 839 #ifndef NDEBUG | 855 #ifndef NDEBUG |
| 840 memset(charPtr + currentSize, kUninitializedByte, recommitSize); | 856 memset(charPtr + currentSize, kUninitializedByte, recommitSize); |
| 841 #endif | 857 #endif |
| 842 } else { | 858 } else { |
| 843 // We can't perform the realloc in-place. | 859 // We can't perform the realloc in-place. |
| 844 // TODO: support this too when possible. | 860 // TODO: support this too when possible. |
| 845 return false; | 861 return false; |
| 846 } | 862 } |
| 847 | 863 |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 960 printf("total live: %zu bytes\n", totalLive); | 976 printf("total live: %zu bytes\n", totalLive); |
| 961 printf("total resident: %zu bytes\n", totalResident); | 977 printf("total resident: %zu bytes\n", totalResident); |
| 962 printf("total freeable: %zu bytes\n", totalFreeable); | 978 printf("total freeable: %zu bytes\n", totalFreeable); |
| 963 fflush(stdout); | 979 fflush(stdout); |
| 964 } | 980 } |
| 965 | 981 |
| 966 #endif // !NDEBUG | 982 #endif // !NDEBUG |
| 967 | 983 |
| 968 } // namespace WTF | 984 } // namespace WTF |
| 969 | 985 |
| OLD | NEW |