Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 472 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 483 | 483 |
| 484 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | 484 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); |
| 485 char* pageCharPtr = reinterpret_cast<char*>(page); | 485 char* pageCharPtr = reinterpret_cast<char*>(page); |
| 486 for (uint16_t i = 1; i < numPartitionPages; ++i) { | 486 for (uint16_t i = 1; i < numPartitionPages; ++i) { |
| 487 pageCharPtr += kPageMetadataSize; | 487 pageCharPtr += kPageMetadataSize; |
| 488 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar Ptr); | 488 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar Ptr); |
| 489 secondaryPage->pageOffset = i; | 489 secondaryPage->pageOffset = i; |
| 490 } | 490 } |
| 491 } | 491 } |
| 492 | 492 |
| 493 static ALWAYS_INLINE size_t partitionRoundUpToSystemPage(size_t size) | |
| 494 { | |
| 495 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; | |
| 496 } | |
| 497 | |
| 493 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page ) | 498 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page ) |
| 494 { | 499 { |
| 495 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 500 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 496 uint16_t numSlots = page->numUnprovisionedSlots; | 501 uint16_t numSlots = page->numUnprovisionedSlots; |
| 497 ASSERT(numSlots); | 502 ASSERT(numSlots); |
| 498 PartitionBucket* bucket = page->bucket; | 503 PartitionBucket* bucket = page->bucket; |
| 499 // We should only get here when _every_ slot is either used or unprovisioned . | 504 // We should only get here when _every_ slot is either used or unprovisioned . |
| 500 // (The third state is "on the freelist". If we have a non-empty freelist, w e should not get here.) | 505 // (The third state is "on the freelist". If we have a non-empty freelist, w e should not get here.) |
| 501 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); | 506 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); |
| 502 // Similarly, make explicitly sure that the freelist is empty. | 507 // Similarly, make explicitly sure that the freelist is empty. |
| 503 ASSERT(!page->freelistHead); | 508 ASSERT(!page->freelistHead); |
| 504 ASSERT(page->numAllocatedSlots >= 0); | 509 ASSERT(page->numAllocatedSlots >= 0); |
| 505 | 510 |
| 506 size_t size = bucket->slotSize; | 511 size_t size = bucket->slotSize; |
| 507 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); | 512 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 508 char* returnObject = base + (size * page->numAllocatedSlots); | 513 char* returnObject = base + (size * page->numAllocatedSlots); |
| 509 char* firstFreelistPointer = returnObject + size; | 514 char* firstFreelistPointer = returnObject + size; |
| 510 char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFr eelistEntry*); | 515 char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFr eelistEntry*); |
| 511 // Our goal is to fault as few system pages as possible. We calculate the | 516 // Our goal is to fault as few system pages as possible. We calculate the |
| 512 // page containing the "end" of the returned slot, and then allow freelist | 517 // page containing the "end" of the returned slot, and then allow freelist |
| 513 // pointers to be written up to the end of that page. | 518 // pointers to be written up to the end of that page. |
| 514 char* subPageLimit = reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(fi rstFreelistPointer) + kSystemPageOffsetMask) & kSystemPageBaseMask); | 519 char* subPageLimit = reinterpret_cast<char*>(partitionRoundUpToSystemPage(re interpret_cast<size_t>(firstFreelistPointer))); |
| 515 char* slotsLimit = returnObject + (size * numSlots); | 520 char* slotsLimit = returnObject + (size * numSlots); |
| 516 char* freelistLimit = subPageLimit; | 521 char* freelistLimit = subPageLimit; |
| 517 if (UNLIKELY(slotsLimit < freelistLimit)) | 522 if (UNLIKELY(slotsLimit < freelistLimit)) |
| 518 freelistLimit = slotsLimit; | 523 freelistLimit = slotsLimit; |
| 519 | 524 |
| 520 uint16_t numNewFreelistEntries = 0; | 525 uint16_t numNewFreelistEntries = 0; |
| 521 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { | 526 if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { |
| 522 // Only consider used space in the slot span. If we consider wasted | 527 // Only consider used space in the slot span. If we consider wasted |
| 523 // space, we may get an off-by-one when a freelist pointer fits in the | 528 // space, we may get an off-by-one when a freelist pointer fits in the |
| 524 // wasted space, but a slot does not. | 529 // wasted space, but a slot does not. |
| (...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 721 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask)); | 726 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask)); |
| 722 | 727 |
| 723 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 728 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 724 // Account for the mapping starting a partition page before the actual | 729 // Account for the mapping starting a partition page before the actual |
| 725 // allocation address. | 730 // allocation address. |
| 726 ptr -= kPartitionPageSize; | 731 ptr -= kPartitionPageSize; |
| 727 | 732 |
| 728 freePages(ptr, unmapSize); | 733 freePages(ptr, unmapSize); |
| 729 } | 734 } |
| 730 | 735 |
| 736 static ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) | |
| 737 { | |
| 738 // For single-slot buckets which span more than one partition page, we | |
| 739 // have some spare metadata space to store the raw allocation size. We | |
| 740 // can use this to report better statistics. | |
| 741 PartitionBucket* bucket = page->bucket; | |
| 742 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) | |
| 743 return nullptr; | |
| 744 | |
| 745 ASSERT(partitionBucketSlots(bucket) == 1); | |
| 746 page++; | |
| 747 return reinterpret_cast<size_t*>(&page->freelistHead); | |
| 748 } | |
| 749 | |
| 731 void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa rtitionBucket* bucket) | 750 void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa rtitionBucket* bucket) |
| 732 { | 751 { |
| 733 // The slow path is called when the freelist is empty. | 752 // The slow path is called when the freelist is empty. |
| 734 ASSERT(!bucket->activePagesHead->freelistHead); | 753 ASSERT(!bucket->activePagesHead->freelistHead); |
| 735 | 754 |
| 755 void* ret = nullptr; | |
| 736 PartitionPage* newPage = nullptr; | 756 PartitionPage* newPage = nullptr; |
| 757 size_t* rawSizePtr; | |
| 737 | 758 |
| 738 // For the partitionAllocGeneric API, we have a bunch of buckets marked | 759 // For the partitionAllocGeneric API, we have a bunch of buckets marked |
| 739 // as special cases. We bounce them through to the slow path so that we | 760 // as special cases. We bounce them through to the slow path so that we |
| 740 // can still have a blazing fast hot path due to lack of corner-case | 761 // can still have a blazing fast hot path due to lack of corner-case |
| 741 // branches. | 762 // branches. |
| 742 bool returnNull = flags & PartitionAllocReturnNull; | 763 bool returnNull = flags & PartitionAllocReturnNull; |
| 743 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 764 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
| 744 ASSERT(size > kGenericMaxBucketed); | 765 ASSERT(size > kGenericMaxBucketed); |
| 745 ASSERT(bucket == &PartitionRootBase::gPagedBucket); | 766 ASSERT(bucket == &PartitionRootBase::gPagedBucket); |
| 746 if (size > kGenericMaxDirectMapped) { | 767 if (size > kGenericMaxDirectMapped) { |
| 747 if (returnNull) | 768 if (returnNull) |
| 748 return 0; | 769 return 0; |
| 749 partitionExcessiveAllocationSize(); | 770 partitionExcessiveAllocationSize(); |
| 750 } | 771 } |
| 751 void* ptr = partitionDirectMap(root, flags, size); | 772 ret = partitionDirectMap(root, flags, size); |
| 752 if (ptr) | 773 if (ret) |
| 753 return ptr; | 774 return ret; |
| 754 goto partitionAllocSlowPathFailed; | 775 goto partitionAllocSlowPathFailed; |
| 755 } | 776 } |
| 756 | 777 |
| 757 // First, look for a usable page in the existing active pages list. | |
| 758 // Change active page, accepting the current page as a candidate. | |
| 759 if (LIKELY(partitionSetNewActivePage(bucket->activePagesHead))) { | 778 if (LIKELY(partitionSetNewActivePage(bucket->activePagesHead))) { |
| 779 // First, look for a usable page in the existing active pages list. | |
| 780 // Change active page, accepting the current page as a candidate. | |
| 760 newPage = bucket->activePagesHead; | 781 newPage = bucket->activePagesHead; |
| 761 if (LIKELY(newPage->freelistHead != 0)) { | 782 if (LIKELY(newPage->freelistHead != 0)) { |
| 762 PartitionFreelistEntry* ret = newPage->freelistHead; | 783 PartitionFreelistEntry* entry = newPage->freelistHead; |
| 763 newPage->freelistHead = partitionFreelistMask(ret->next); | 784 newPage->freelistHead = partitionFreelistMask(entry->next); |
| 764 newPage->numAllocatedSlots++; | 785 newPage->numAllocatedSlots++; |
| 765 return ret; | 786 ret = entry; |
| 766 } | 787 } |
| 767 ASSERT(newPage->numUnprovisionedSlots); | 788 } else if (LIKELY(bucket->emptyPagesHead != nullptr)) { |
| 768 return partitionPageAllocAndFillFreelist(newPage); | 789 // Second, look in our list of freed but reserved pages. |
| 769 } | 790 newPage = bucket->emptyPagesHead; |
| 770 | |
| 771 // Second, look in our list of freed but reserved pages. | |
| 772 newPage = bucket->emptyPagesHead; | |
| 773 if (LIKELY(newPage != 0)) { | |
| 774 bucket->emptyPagesHead = newPage->nextPage; | 791 bucket->emptyPagesHead = newPage->nextPage; |
| 775 void* addr = partitionPageToPointer(newPage); | 792 void* addr = partitionPageToPointer(newPage); |
| 776 partitionRecommitSystemPages(root, addr, partitionBucketBytes(newPage->b ucket)); | 793 partitionRecommitSystemPages(root, addr, partitionBucketBytes(newPage->b ucket)); |
| 777 partitionPageReset(newPage, bucket); | 794 partitionPageReset(newPage, bucket); |
| 778 } else { | 795 } else { |
| 779 // Third. If we get here, we need a brand new page. | 796 // Third. If we get here, we need a brand new page. |
| 780 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); | 797 uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); |
| 781 void* rawNewPage = partitionAllocPartitionPages(root, flags, numPartitio nPages); | 798 void* rawNewPage = partitionAllocPartitionPages(root, flags, numPartitio nPages); |
| 782 if (UNLIKELY(!rawNewPage)) | 799 if (UNLIKELY(!rawNewPage)) |
| 783 goto partitionAllocSlowPathFailed; | 800 goto partitionAllocSlowPathFailed; |
| 784 // Skip the alignment check because it depends on page->bucket, which is not yet set. | 801 // Skip the alignment check because it depends on page->bucket, which is not yet set. |
| 785 newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage); | 802 newPage = partitionPointerToPageNoAlignmentCheck(rawNewPage); |
| 786 partitionPageSetup(newPage, bucket); | 803 partitionPageSetup(newPage, bucket); |
| 787 } | 804 } |
| 788 | 805 |
| 789 bucket->activePagesHead = newPage; | 806 bucket->activePagesHead = newPage; |
| 807 | |
| 808 rawSizePtr = partitionPageGetRawSizePtr(newPage); | |
| 809 if (UNLIKELY(rawSizePtr != nullptr)) | |
|
haraken
2015/06/06 15:07:54
Given that this is a alow path, we might want to d
| |
| 810 *rawSizePtr = size; | |
| 811 | |
| 812 if (LIKELY(ret != nullptr)) | |
|
haraken
2015/06/06 15:07:53
This looks a bit weird. Maybe we can create a help
| |
| 813 return ret; | |
| 790 return partitionPageAllocAndFillFreelist(newPage); | 814 return partitionPageAllocAndFillFreelist(newPage); |
| 791 | 815 |
| 792 partitionAllocSlowPathFailed: | 816 partitionAllocSlowPathFailed: |
| 793 if (returnNull) { | 817 if (returnNull) { |
| 794 // If we get here, we will set the active page to null, which is an | 818 // If we get here, we will set the active page to null, which is an |
| 795 // invalid state. To support continued use of this bucket, we need to | 819 // invalid state. To support continued use of this bucket, we need to |
| 796 // restore a valid state, by setting the active page to the seed page. | 820 // restore a valid state, by setting the active page to the seed page. |
| 797 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; | 821 bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; |
| 798 return nullptr; | 822 return nullptr; |
| 799 } | 823 } |
| (...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1038 } | 1062 } |
| 1039 | 1063 |
| 1040 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P artitionPage* page) | 1064 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P artitionPage* page) |
| 1041 { | 1065 { |
| 1042 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); | 1066 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); |
| 1043 | 1067 |
| 1044 if (!page->freelistHead && page->numAllocatedSlots == 0) { | 1068 if (!page->freelistHead && page->numAllocatedSlots == 0) { |
| 1045 ASSERT(!page->numUnprovisionedSlots); | 1069 ASSERT(!page->numUnprovisionedSlots); |
| 1046 ++statsOut->numDecommittedPages; | 1070 ++statsOut->numDecommittedPages; |
| 1047 } else { | 1071 } else { |
| 1048 statsOut->activeBytes += (page->numAllocatedSlots * statsOut->bucketSlot Size); | 1072 size_t* rawSizePtr = partitionPageGetRawSizePtr(const_cast<PartitionPage *>(page)); |
| 1073 if (rawSizePtr) | |
| 1074 statsOut->activeBytes += static_cast<uint32_t>(partitionRoundUpToSys temPage(*rawSizePtr)); | |
|
haraken
2015/06/06 15:07:54
Help me understand: Why is this more accurate than
| |
| 1075 else | |
| 1076 statsOut->activeBytes += (page->numAllocatedSlots * statsOut->bucket SlotSize); | |
| 1049 size_t pageBytesResident = (bucketNumSlots - page->numUnprovisionedSlots ) * statsOut->bucketSlotSize; | 1077 size_t pageBytesResident = (bucketNumSlots - page->numUnprovisionedSlots ) * statsOut->bucketSlotSize; |
| 1050 // Round up to system page size. | 1078 // Round up to system page size. |
| 1051 size_t pageBytesResidentRounded = (pageBytesResident + kSystemPageOffset Mask) & kSystemPageBaseMask; | 1079 size_t pageBytesResidentRounded = partitionRoundUpToSystemPage(pageBytes Resident); |
| 1052 statsOut->residentBytes += pageBytesResidentRounded; | 1080 statsOut->residentBytes += pageBytesResidentRounded; |
| 1053 if (!page->numAllocatedSlots) { | 1081 if (!page->numAllocatedSlots) { |
| 1054 statsOut->freeableBytes += pageBytesResidentRounded; | 1082 statsOut->freeableBytes += pageBytesResidentRounded; |
| 1055 ++statsOut->numEmptyPages; | 1083 ++statsOut->numEmptyPages; |
| 1056 } else if (page->numAllocatedSlots == bucketNumSlots) { | 1084 } else if (page->numAllocatedSlots == bucketNumSlots) { |
| 1057 ++statsOut->numFullPages; | 1085 ++statsOut->numFullPages; |
| 1058 } else { | 1086 } else { |
| 1059 ++statsOut->numActivePages; | 1087 ++statsOut->numActivePages; |
| 1060 } | 1088 } |
| 1061 } | 1089 } |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1165 // partitionsDumpBucketStats is called after collecting stats because it | 1193 // partitionsDumpBucketStats is called after collecting stats because it |
| 1166 // can use PartitionAlloc to allocate and this can affect the statistics. | 1194 // can use PartitionAlloc to allocate and this can affect the statistics. |
| 1167 for (size_t i = 0; i < partitionNumBuckets; ++i) { | 1195 for (size_t i = 0; i < partitionNumBuckets; ++i) { |
| 1168 if (memoryStats[i].isValid) | 1196 if (memoryStats[i].isValid) |
| 1169 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &memo ryStats[i]); | 1197 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &memo ryStats[i]); |
| 1170 } | 1198 } |
| 1171 } | 1199 } |
| 1172 | 1200 |
| 1173 } // namespace WTF | 1201 } // namespace WTF |
| 1174 | 1202 |
| OLD | NEW |