Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 472 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 483 | 483 |
| 484 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | 484 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); |
| 485 char* pageCharPtr = reinterpret_cast<char*>(page); | 485 char* pageCharPtr = reinterpret_cast<char*>(page); |
| 486 for (uint16_t i = 1; i < numPartitionPages; ++i) { | 486 for (uint16_t i = 1; i < numPartitionPages; ++i) { |
| 487 pageCharPtr += kPageMetadataSize; | 487 pageCharPtr += kPageMetadataSize; |
| 488 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar Ptr); | 488 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar Ptr); |
| 489 secondaryPage->pageOffset = i; | 489 secondaryPage->pageOffset = i; |
| 490 } | 490 } |
| 491 } | 491 } |
| 492 | 492 |
| 493 static ALWAYS_INLINE size_t partitionRoundUpToSystemPage(size_t size) | |
| 494 { | |
| 495 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; | |
| 496 } | |
| 497 | |
| 493 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page ) | 498 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page ) |
| 494 { | 499 { |
| 495 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 500 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 496 uint16_t numSlots = page->numUnprovisionedSlots; | 501 uint16_t numSlots = page->numUnprovisionedSlots; |
| 497 ASSERT(numSlots); | 502 ASSERT(numSlots); |
| 498 PartitionBucket* bucket = page->bucket; | 503 PartitionBucket* bucket = page->bucket; |
| 499 // We should only get here when _every_ slot is either used or unprovisioned . | 504 // We should only get here when _every_ slot is either used or unprovisioned . |
| 500 // (The third state is "on the freelist". If we have a non-empty freelist, w e should not get here.) | 505 // (The third state is "on the freelist". If we have a non-empty freelist, w e should not get here.) |
| 501 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); | 506 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); |
| 502 // Similarly, make explicitly sure that the freelist is empty. | 507 // Similarly, make explicitly sure that the freelist is empty. |
| 503 ASSERT(!page->freelistHead); | 508 ASSERT(!page->freelistHead); |
| 504 ASSERT(page->numAllocatedSlots >= 0); | 509 ASSERT(page->numAllocatedSlots >= 0); |
| 505 | 510 |
| 506 size_t size = bucket->slotSize; | 511 size_t size = bucket->slotSize; |
| 507 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); | 512 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 508 char* returnObject = base + (size * page->numAllocatedSlots); | 513 char* returnObject = base + (size * page->numAllocatedSlots); |
| 509 char* firstFreelistPointer = returnObject + size; | 514 char* firstFreelistPointer = returnObject + size; |
| 510 char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFr eelistEntry*); | 515 char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFr eelistEntry*); |
| 511 // Our goal is to fault as few system pages as possible. We calculate the | 516 // Our goal is to fault as few system pages as possible. We calculate the |
| 512 // page containing the "end" of the returned slot, and then allow freelist | 517 // page containing the "end" of the returned slot, and then allow freelist |
| 513 // pointers to be written up to the end of that page. | 518 // pointers to be written up to the end of that page. |
| 514 char* subPageLimit = reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(fi rstFreelistPointer) + kSystemPageOffsetMask) & kSystemPageBaseMask); | 519 char* subPageLimit = reinterpret_cast<char*>(partitionRoundUpToSystemPage(re interpret_cast<size_t>(firstFreelistPointer))); |
| 515 char* slotsLimit = returnObject + (size * numSlots); | 520 char* slotsLimit = returnObject + (size * numSlots); |
| 516 char* freelistLimit = subPageLimit; | 521 char* freelistLimit = subPageLimit; |
| 517 if (UNLIKELY(slotsLimit < freelistLimit)) | 522 if (UNLIKELY(slotsLimit < freelistLimit)) |
| 518 freelistLimit = slotsLimit; | 523 freelistLimit = slotsLimit; |
| 519 | 524 |
| 520 uint16_t numNewFreelistEntries = 0; | 525 uint16_t numNewFreelistEntries = 0; |
| 521 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { | 526 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { |
| 522 // Only consider used space in the slot span. If we consider wasted | 527 // Only consider used space in the slot span. If we consider wasted |
| 523 // space, we may get an off-by-one when a freelist pointer fits in the | 528 // space, we may get an off-by-one when a freelist pointer fits in the |
| 524 // wasted space, but a slot does not. | 529 // wasted space, but a slot does not. |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 593 bucket->emptyPagesHead = page; | 598 bucket->emptyPagesHead = page; |
| 594 } else { | 599 } else { |
| 595 // If we get here, we found a full page. Skip over it too, and also | 600 // If we get here, we found a full page. Skip over it too, and also |
| 596 // tag it as full (via a negative value). We need it tagged so that | 601 // tag it as full (via a negative value). We need it tagged so that |
| 597 // free'ing can tell, and move it back into the active page list. | 602 // free'ing can tell, and move it back into the active page list. |
| 598 ASSERT(page->numAllocatedSlots == partitionBucketSlots(bucket)); | 603 ASSERT(page->numAllocatedSlots == partitionBucketSlots(bucket)); |
| 599 page->numAllocatedSlots = -page->numAllocatedSlots; | 604 page->numAllocatedSlots = -page->numAllocatedSlots; |
| 600 ++bucket->numFullPages; | 605 ++bucket->numFullPages; |
| 601 // numFullPages is a uint16_t for efficient packing so guard against | 606 // numFullPages is a uint16_t for efficient packing so guard against |
| 602 // overflow to be safe. | 607 // overflow to be safe. |
| 603 if (!bucket->numFullPages) | 608 if (UNLIKELY(!bucket->numFullPages)) |
| 604 partitionBucketFull(); | 609 partitionBucketFull(); |
| 605 // Not necessary but might help stop accidents. | 610 // Not necessary but might help stop accidents. |
| 606 page->nextPage = 0; | 611 page->nextPage = 0; |
| 607 } | 612 } |
| 608 } | 613 } |
| 609 | 614 |
| 610 bucket->activePagesHead = 0; | 615 bucket->activePagesHead = 0; |
| 611 return false; | 616 return false; |
| 612 } | 617 } |
| 613 | 618 |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 721 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask)); | 726 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask)); |
| 722 | 727 |
| 723 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 728 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 724 // Account for the mapping starting a partition page before the actual | 729 // Account for the mapping starting a partition page before the actual |
| 725 // allocation address. | 730 // allocation address. |
| 726 ptr -= kPartitionPageSize; | 731 ptr -= kPartitionPageSize; |
| 727 | 732 |
| 728 freePages(ptr, unmapSize); | 733 freePages(ptr, unmapSize); |
| 729 } | 734 } |
| 730 | 735 |
| 736 static ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) | |
| 737 { | |
| 738 // For single-slot buckets which span more than one partition page, we | |
| 739 // have some spare metadata space to store the raw allocation size. We | |
| 740 // can use this to report better statistics. | |
| 741 PartitionBucket* bucket = page->bucket; | |
| 742 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) | |
| 743 return nullptr; | |
| 744 | |
| 745 ASSERT(partitionBucketSlots(bucket) == 1); | |
| 746 page++; | |
| 747 return reinterpret_cast<size_t*>(&page->freelistHead); | |
| 748 } | |
| 749 | |
| 750 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, size_t si ze) | |
| 751 { | |
| 752 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | |
| 753 if (UNLIKELY(rawSizePtr != nullptr)) | |
| 754 *rawSizePtr = size; | |
| 755 } | |
| 756 | |
| 731 void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa rtitionBucket* bucket) | 757 void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa rtitionBucket* bucket) |
| 732 { | 758 { |
| 733 // The slow path is called when the freelist is empty. | 759 // The slow path is called when the freelist is empty. |
| 734 ASSERT(!bucket->activePagesHead->freelistHead); | 760 ASSERT(!bucket->activePagesHead->freelistHead); |
| 735 | 761 |
| 736 PartitionPage* newPage = nullptr; | 762 PartitionPage* newPage = nullptr; |
| 737 | 763 |
| 738 // For the partitionAllocGeneric API, we have a bunch of buckets marked | 764 // For the partitionAllocGeneric API, we have a bunch of buckets marked |
| 739 // as special cases. We bounce them through to the slow path so that we | 765 // as special cases. We bounce them through to the slow path so that we |
| 740 // can still have a blazing fast hot path due to lack of corner-case | 766 // can still have a blazing fast hot path due to lack of corner-case |
| 741 // branches. | 767 // branches. |
| 742 bool returnNull = flags & PartitionAllocReturnNull; | 768 bool returnNull = flags & PartitionAllocReturnNull; |
| 743 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 769 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
| 744 ASSERT(size > kGenericMaxBucketed); | 770 ASSERT(size > kGenericMaxBucketed); |
| 745 ASSERT(bucket == &PartitionRootBase::gPagedBucket); | 771 ASSERT(bucket == &PartitionRootBase::gPagedBucket); |
| 746 if (size > kGenericMaxDirectMapped) { | 772 if (size > kGenericMaxDirectMapped) { |
| 747 if (returnNull) | 773 if (returnNull) |
| 748 return 0; | 774 return 0; |
| 749 partitionExcessiveAllocationSize(); | 775 partitionExcessiveAllocationSize(); |
| 750 } | 776 } |
| 751 void* ptr = partitionDirectMap(root, flags, size); | 777 void* ret = partitionDirectMap(root, flags, size); |
| 752 if (ptr) | 778 if (ret) |
| 753 return ptr; | 779 return ret; |
| 754 goto partitionAllocSlowPathFailed; | 780 goto partitionAllocSlowPathFailed; |
| 755 } | 781 } |
| 756 | 782 |
| 757 // First, look for a usable page in the existing active pages list. | |
| 758 // Change active page, accepting the current page as a candidate. | |
| 759 if (LIKELY(partitionSetNewActivePage(bucket->activePagesHead))) { | 783 if (LIKELY(partitionSetNewActivePage(bucket->activePagesHead))) { |
| 784 // First, look for a usable page in the existing active pages list. | |
| 785 // Change active page, accepting the current page as a candidate. | |
| 760 newPage = bucket->activePagesHead; | 786 newPage = bucket->activePagesHead; |
| 761 if (LIKELY(newPage->freelistHead != 0)) { | 787 if (LIKELY(newPage->freelistHead != 0)) { |
| 762 PartitionFreelistEntry* ret = newPage->freelistHead; | 788 PartitionFreelistEntry* entry = newPage->freelistHead; |
| 763 newPage->freelistHead = partitionFreelistMask(ret->next); | 789 newPage->freelistHead = partitionFreelistMask(entry->next); |
| 764 newPage->numAllocatedSlots++; | 790 newPage->numAllocatedSlots++; |
| 765 return ret; | 791 partitionPageSetRawSize(newPage, size); |
| 792 return entry; | |
| 766 } | 793 } |
| 767 ASSERT(newPage->numUnprovisionedSlots); | 794 } else if (LIKELY(bucket->emptyPagesHead != nullptr)) { |
| 768 return partitionPageAllocAndFillFreelist(newPage); | 795 // Second, look in our list of freed but reserved pages. |
| 769 } | 796 newPage = bucket->emptyPagesHead; |
| 770 | |
| 771 // Second, look in our list of freed but reserved pages. | |
| 772 newPage = bucket->emptyPagesHead; | |
| 773 if (LIKELY(newPage != 0)) { | |
| 774 bucket->emptyPagesHead = newPage->nextPage; | 797 bucket->emptyPagesHead = newPage->nextPage; |
| 775 void* addr = partitionPageToPointer(newPage); | 798 void* addr = partitionPageToPointer(newPage); |
| 776 partitionRecommitSystemPages(root, addr, partitionBucketBytes(newPage->b ucket)); | 799 partitionRecommitSystemPages(root, addr, partitionBucketBytes(newPage->b ucket)); |
| 777 partitionPageReset(newPage, bucket); | 800 partitionPageReset(newPage, bucket); |
| 778 } else { | 801 } else { |
| 779 // Third. If we get here, we need a brand new page. | 802 // Third. If we get here, we need a brand new page. |
| 780 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | 803 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); |
| 781 void* rawNewPage = partitionAllocPartitionPages(root, flags, numPartitio nPages); | 804 void* rawNewPage = partitionAllocPartitionPages(root, flags, numPartitio nPages); |
| 782 if (UNLIKELY(!rawNewPage)) | 805 if (UNLIKELY(!rawNewPage)) |
| 783 goto partitionAllocSlowPathFailed; | 806 goto partitionAllocSlowPathFailed; |
| 784 // Skip the alignment check because it depends on page->bucket, which is not yet set. | 807 // Skip the alignment check because it depends on page->bucket, which is not yet set. |
| 785 newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage); | 808 newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage); |
| 786 partitionPageSetup(newPage, bucket); | 809 partitionPageSetup(newPage, bucket); |
| 787 } | 810 } |
| 788 | 811 |
| 789 bucket->activePagesHead = newPage; | 812 bucket->activePagesHead = newPage; |
| 813 partitionPageSetRawSize(newPage, size); | |
| 814 | |
| 790 return partitionPageAllocAndFillFreelist(newPage); | 815 return partitionPageAllocAndFillFreelist(newPage); |
| 791 | 816 |
| 792 partitionAllocSlowPathFailed: | 817 partitionAllocSlowPathFailed: |
| 793 if (returnNull) { | 818 if (returnNull) { |
| 794 // If we get here, we will set the active page to null, which is an | 819 // If we get here, we will set the active page to null, which is an |
| 795 // invalid state. To support continued use of this bucket, we need to | 820 // invalid state. To support continued use of this bucket, we need to |
| 796 // restore a valid state, by setting the active page to the seed page. | 821 // restore a valid state, by setting the active page to the seed page. |
| 797 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | 822 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; |
| 798 return nullptr; | 823 return nullptr; |
| 799 } | 824 } |
| (...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1038 } | 1063 } |
| 1039 | 1064 |
| 1040 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P artitionPage* page) | 1065 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P artitionPage* page) |
| 1041 { | 1066 { |
| 1042 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); | 1067 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); |
| 1043 | 1068 |
| 1044 if (!page->freelistHead && page->numAllocatedSlots == 0) { | 1069 if (!page->freelistHead && page->numAllocatedSlots == 0) { |
| 1045 ASSERT(!page->numUnprovisionedSlots); | 1070 ASSERT(!page->numUnprovisionedSlots); |
| 1046 ++statsOut->numDecommittedPages; | 1071 ++statsOut->numDecommittedPages; |
| 1047 } else { | 1072 } else { |
| 1048 statsOut->activeBytes += (page->numAllocatedSlots * statsOut->bucketSlot Size); | 1073 size_t* rawSizePtr = partitionPageGetRawSizePtr(const_cast<PartitionPage *>(page)); |
|
haraken
2015/06/10 00:54:01
Nit: It looks a bit tidier to introduce partitionP
| |
| 1074 if (rawSizePtr) | |
| 1075 statsOut->activeBytes += static_cast<uint32_t>(partitionRoundUpToSys temPage(*rawSizePtr)); | |
| 1076 else | |
| 1077 statsOut->activeBytes += (page->numAllocatedSlots * statsOut->bucket SlotSize); | |
| 1049 size_t pageBytesResident = (bucketNumSlots - page->numUnprovisionedSlots ) * statsOut->bucketSlotSize; | 1078 size_t pageBytesResident = (bucketNumSlots - page->numUnprovisionedSlots ) * statsOut->bucketSlotSize; |
| 1050 // Round up to system page size. | 1079 // Round up to system page size. |
| 1051 size_t pageBytesResidentRounded = (pageBytesResident + kSystemPageOffset Mask) & kSystemPageBaseMask; | 1080 size_t pageBytesResidentRounded = partitionRoundUpToSystemPage(pageBytes Resident); |
| 1052 statsOut->residentBytes += pageBytesResidentRounded; | 1081 statsOut->residentBytes += pageBytesResidentRounded; |
| 1053 if (!page->numAllocatedSlots) { | 1082 if (!page->numAllocatedSlots) { |
| 1054 statsOut->freeableBytes += pageBytesResidentRounded; | 1083 statsOut->freeableBytes += pageBytesResidentRounded; |
| 1055 ++statsOut->numEmptyPages; | 1084 ++statsOut->numEmptyPages; |
| 1056 } else if (page->numAllocatedSlots == bucketNumSlots) { | 1085 } else if (page->numAllocatedSlots == bucketNumSlots) { |
| 1057 ++statsOut->numFullPages; | 1086 ++statsOut->numFullPages; |
| 1058 } else { | 1087 } else { |
| 1059 ++statsOut->numActivePages; | 1088 ++statsOut->numActivePages; |
| 1060 } | 1089 } |
| 1061 } | 1090 } |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1165 // partitionsDumpBucketStats is called after collecting stats because it | 1194 // partitionsDumpBucketStats is called after collecting stats because it |
| 1166 // can use PartitionAlloc to allocate and this can affect the statistics. | 1195 // can use PartitionAlloc to allocate and this can affect the statistics. |
| 1167 for (size_t i = 0; i < partitionNumBuckets; ++i) { | 1196 for (size_t i = 0; i < partitionNumBuckets; ++i) { |
| 1168 if (memoryStats[i].isValid) | 1197 if (memoryStats[i].isValid) |
| 1169 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &memo ryStats[i]); | 1198 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &memo ryStats[i]); |
| 1170 } | 1199 } |
| 1171 } | 1200 } |
| 1172 | 1201 |
| 1173 } // namespace WTF | 1202 } // namespace WTF |
| 1174 | 1203 |
| OLD | NEW |