Chromium Code Reviews| Index: Source/wtf/PartitionAlloc.cpp |
| diff --git a/Source/wtf/PartitionAlloc.cpp b/Source/wtf/PartitionAlloc.cpp |
| index d6fc5a6cdf2ebc3ca117b5909026b9ac4a315369..53937e75015b44d5e1480199781038b7d0c01d6b 100644 |
| --- a/Source/wtf/PartitionAlloc.cpp |
| +++ b/Source/wtf/PartitionAlloc.cpp |
| @@ -490,6 +490,11 @@ static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, PartitionBucke |
| } |
| } |
| +static ALWAYS_INLINE size_t partitionRoundUpToSystemPage(size_t size) |
| +{ |
| + return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; |
| +} |
| + |
| static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page) |
| { |
| ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| @@ -511,7 +516,7 @@ static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page |
| // Our goal is to fault as few system pages as possible. We calculate the |
| // page containing the "end" of the returned slot, and then allow freelist |
| // pointers to be written up to the end of that page. |
| - char* subPageLimit = reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(firstFreelistPointer) + kSystemPageOffsetMask) & kSystemPageBaseMask); |
| + char* subPageLimit = reinterpret_cast<char*>(partitionRoundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer))); |
| char* slotsLimit = returnObject + (size * numSlots); |
| char* freelistLimit = subPageLimit; |
| if (UNLIKELY(slotsLimit < freelistLimit)) |
| @@ -728,12 +733,28 @@ static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) |
| freePages(ptr, unmapSize); |
| } |
| +static ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) |
| +{ |
| + // For single-slot buckets which span more than one partition page, we |
| + // have some spare metadata space to store the raw allocation size. We |
| + // can use this to report better statistics. |
| + PartitionBucket* bucket = page->bucket; |
| + if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) |
| + return nullptr; |
| + |
| + ASSERT(partitionBucketSlots(bucket) == 1); |
| + page++; |
| + return reinterpret_cast<size_t*>(&page->freelistHead); |
| +} |
| + |
| void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, PartitionBucket* bucket) |
| { |
| // The slow path is called when the freelist is empty. |
| ASSERT(!bucket->activePagesHead->freelistHead); |
| + void* ret = nullptr; |
| PartitionPage* newPage = nullptr; |
| + size_t* rawSizePtr; |
| // For the partitionAllocGeneric API, we have a bunch of buckets marked |
| // as special cases. We bounce them through to the slow path so that we |
| @@ -748,29 +769,25 @@ void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa |
| return 0; |
| partitionExcessiveAllocationSize(); |
| } |
| - void* ptr = partitionDirectMap(root, flags, size); |
| - if (ptr) |
| - return ptr; |
| + ret = partitionDirectMap(root, flags, size); |
| + if (ret) |
| + return ret; |
| goto partitionAllocSlowPathFailed; |
| } |
| - // First, look for a usable page in the existing active pages list. |
| - // Change active page, accepting the current page as a candidate. |
| if (LIKELY(partitionSetNewActivePage(bucket->activePagesHead))) { |
| + // First, look for a usable page in the existing active pages list. |
| + // Change active page, accepting the current page as a candidate. |
| newPage = bucket->activePagesHead; |
| if (LIKELY(newPage->freelistHead != 0)) { |
| - PartitionFreelistEntry* ret = newPage->freelistHead; |
| - newPage->freelistHead = partitionFreelistMask(ret->next); |
| + PartitionFreelistEntry* entry = newPage->freelistHead; |
| + newPage->freelistHead = partitionFreelistMask(entry->next); |
| newPage->numAllocatedSlots++; |
| - return ret; |
| + ret = entry; |
| } |
| - ASSERT(newPage->numUnprovisionedSlots); |
| - return partitionPageAllocAndFillFreelist(newPage); |
| - } |
| - |
| - // Second, look in our list of freed but reserved pages. |
| - newPage = bucket->emptyPagesHead; |
| - if (LIKELY(newPage != 0)) { |
| + } else if (LIKELY(bucket->emptyPagesHead != nullptr)) { |
| + // Second, look in our list of freed but reserved pages. |
| + newPage = bucket->emptyPagesHead; |
| bucket->emptyPagesHead = newPage->nextPage; |
| void* addr = partitionPageToPointer(newPage); |
| partitionRecommitSystemPages(root, addr, partitionBucketBytes(newPage->bucket)); |
| @@ -787,6 +804,13 @@ void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa |
| } |
| bucket->activePagesHead = newPage; |
| + |
| + rawSizePtr = partitionPageGetRawSizePtr(newPage); |
| + if (UNLIKELY(rawSizePtr != nullptr)) |
|
haraken
2015/06/06 15:07:54
Given that this is a alow path, we might want to d
|
| + *rawSizePtr = size; |
| + |
| + if (LIKELY(ret != nullptr)) |
|
haraken
2015/06/06 15:07:53
This looks a bit weird. Maybe we can create a help
|
| + return ret; |
| return partitionPageAllocAndFillFreelist(newPage); |
| partitionAllocSlowPathFailed: |
| @@ -1045,10 +1069,14 @@ static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P |
| ASSERT(!page->numUnprovisionedSlots); |
| ++statsOut->numDecommittedPages; |
| } else { |
| - statsOut->activeBytes += (page->numAllocatedSlots * statsOut->bucketSlotSize); |
| + size_t* rawSizePtr = partitionPageGetRawSizePtr(const_cast<PartitionPage*>(page)); |
| + if (rawSizePtr) |
| + statsOut->activeBytes += static_cast<uint32_t>(partitionRoundUpToSystemPage(*rawSizePtr)); |
|
haraken
2015/06/06 15:07:54
Help me understand: Why is this more accurate than
|
| + else |
| + statsOut->activeBytes += (page->numAllocatedSlots * statsOut->bucketSlotSize); |
| size_t pageBytesResident = (bucketNumSlots - page->numUnprovisionedSlots) * statsOut->bucketSlotSize; |
| // Round up to system page size. |
| - size_t pageBytesResidentRounded = (pageBytesResident + kSystemPageOffsetMask) & kSystemPageBaseMask; |
| + size_t pageBytesResidentRounded = partitionRoundUpToSystemPage(pageBytesResident); |
| statsOut->residentBytes += pageBytesResidentRounded; |
| if (!page->numAllocatedSlots) { |
| statsOut->freeableBytes += pageBytesResidentRounded; |