OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
93 bestPages = static_cast<uint16_t>(size / kSystemPageSize); | 93 bestPages = static_cast<uint16_t>(size / kSystemPageSize); |
94 RELEASE_ASSERT(bestPages < (1 << 8)); | 94 RELEASE_ASSERT(bestPages < (1 << 8)); |
95 return static_cast<uint8_t>(bestPages); | 95 return static_cast<uint8_t>(bestPages); |
96 } | 96 } |
97 ASSERT(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); | 97 ASSERT(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); |
98 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; | 98 for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; |
99 i <= kMaxSystemPagesPerSlotSpan; ++i) { | 99 i <= kMaxSystemPagesPerSlotSpan; ++i) { |
100 size_t pageSize = kSystemPageSize * i; | 100 size_t pageSize = kSystemPageSize * i; |
101 size_t numSlots = pageSize / size; | 101 size_t numSlots = pageSize / size; |
102 size_t waste = pageSize - (numSlots * size); | 102 size_t waste = pageSize - (numSlots * size); |
103 // Leaving a page unfaulted is not free; the page will occupy an empty page
table entry. | 103 // Leaving a page unfaulted is not free; the page will occupy an empty page |
104 // Make a simple attempt to account for that. | 104 // table entry. Make a simple attempt to account for that. |
105 size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); | 105 size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); |
106 size_t numUnfaultedPages = | 106 size_t numUnfaultedPages = |
107 numRemainderPages | 107 numRemainderPages |
108 ? (kNumSystemPagesPerPartitionPage - numRemainderPages) | 108 ? (kNumSystemPagesPerPartitionPage - numRemainderPages) |
109 : 0; | 109 : 0; |
110 waste += sizeof(void*) * numUnfaultedPages; | 110 waste += sizeof(void*) * numUnfaultedPages; |
111 double wasteRatio = (double)waste / (double)pageSize; | 111 double wasteRatio = (double)waste / (double)pageSize; |
112 if (wasteRatio < bestWasteRatio) { | 112 if (wasteRatio < bestWasteRatio) { |
113 bestWasteRatio = wasteRatio; | 113 bestWasteRatio = wasteRatio; |
114 bestPages = i; | 114 bestPages = i; |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
185 | 185 |
186 void partitionAllocGenericInit(PartitionRootGeneric* root) { | 186 void partitionAllocGenericInit(PartitionRootGeneric* root) { |
187 SpinLock::Guard guard(root->lock); | 187 SpinLock::Guard guard(root->lock); |
188 | 188 |
189 partitionAllocBaseInit(root); | 189 partitionAllocBaseInit(root); |
190 | 190 |
191 // Precalculate some shift and mask constants used in the hot path. | 191 // Precalculate some shift and mask constants used in the hot path. |
192 // Example: malloc(41) == 101001 binary. | 192 // Example: malloc(41) == 101001 binary. |
193 // Order is 6 (1 << 6-1)==32 is highest bit set. | 193 // Order is 6 (1 << 6-1)==32 is highest bit set. |
194 // orderIndex is the next three MSB == 010 == 2. | 194 // orderIndex is the next three MSB == 010 == 2. |
195 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for
the subOrderIndex). | 195 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for |
| 196 // the subOrderIndex). |
196 size_t order; | 197 size_t order; |
197 for (order = 0; order <= kBitsPerSizet; ++order) { | 198 for (order = 0; order <= kBitsPerSizet; ++order) { |
198 size_t orderIndexShift; | 199 size_t orderIndexShift; |
199 if (order < kGenericNumBucketsPerOrderBits + 1) | 200 if (order < kGenericNumBucketsPerOrderBits + 1) |
200 orderIndexShift = 0; | 201 orderIndexShift = 0; |
201 else | 202 else |
202 orderIndexShift = order - (kGenericNumBucketsPerOrderBits + 1); | 203 orderIndexShift = order - (kGenericNumBucketsPerOrderBits + 1); |
203 root->orderIndexShifts[order] = orderIndexShift; | 204 root->orderIndexShifts[order] = orderIndexShift; |
204 size_t subOrderIndexMask; | 205 size_t subOrderIndexMask; |
205 if (order == kBitsPerSizet) { | 206 if (order == kBitsPerSizet) { |
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
437 // In this case, we can still hand out pages from the current super page | 438 // In this case, we can still hand out pages from the current super page |
438 // allocation. | 439 // allocation. |
439 char* ret = root->nextPartitionPage; | 440 char* ret = root->nextPartitionPage; |
440 root->nextPartitionPage += totalSize; | 441 root->nextPartitionPage += totalSize; |
441 partitionIncreaseCommittedPages(root, totalSize); | 442 partitionIncreaseCommittedPages(root, totalSize); |
442 return ret; | 443 return ret; |
443 } | 444 } |
444 | 445 |
445 // Need a new super page. We want to allocate super pages in a continguous | 446 // Need a new super page. We want to allocate super pages in a continguous |
446 // address region as much as possible. This is important for not causing | 447 // address region as much as possible. This is important for not causing |
447 // page table bloat and not fragmenting address spaces in 32 bit architectures
. | 448 // page table bloat and not fragmenting address spaces in 32 bit |
| 449 // architectures. |
448 char* requestedAddress = root->nextSuperPage; | 450 char* requestedAddress = root->nextSuperPage; |
449 char* superPage = reinterpret_cast<char*>(allocPages( | 451 char* superPage = reinterpret_cast<char*>(allocPages( |
450 requestedAddress, kSuperPageSize, kSuperPageSize, PageAccessible)); | 452 requestedAddress, kSuperPageSize, kSuperPageSize, PageAccessible)); |
451 if (UNLIKELY(!superPage)) | 453 if (UNLIKELY(!superPage)) |
452 return 0; | 454 return 0; |
453 | 455 |
454 root->totalSizeOfSuperPages += kSuperPageSize; | 456 root->totalSizeOfSuperPages += kSuperPageSize; |
455 partitionIncreaseCommittedPages(root, totalSize); | 457 partitionIncreaseCommittedPages(root, totalSize); |
456 | 458 |
457 root->nextSuperPage = superPage + kSuperPageSize; | 459 root->nextSuperPage = superPage + kSuperPageSize; |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
500 ASSERT(!root->firstExtent); | 502 ASSERT(!root->firstExtent); |
501 root->firstExtent = latestExtent; | 503 root->firstExtent = latestExtent; |
502 } else { | 504 } else { |
503 ASSERT(currentExtent->superPageBase); | 505 ASSERT(currentExtent->superPageBase); |
504 currentExtent->next = latestExtent; | 506 currentExtent->next = latestExtent; |
505 } | 507 } |
506 root->currentExtent = latestExtent; | 508 root->currentExtent = latestExtent; |
507 latestExtent->superPageBase = superPage; | 509 latestExtent->superPageBase = superPage; |
508 latestExtent->superPagesEnd = superPage + kSuperPageSize; | 510 latestExtent->superPagesEnd = superPage + kSuperPageSize; |
509 } else { | 511 } else { |
510 // We allocated next to an existing extent so just nudge the size up a littl
e. | 512 // We allocated next to an existing extent so just nudge the size up a |
| 513 // little. |
511 ASSERT(currentExtent->superPagesEnd); | 514 ASSERT(currentExtent->superPagesEnd); |
512 currentExtent->superPagesEnd += kSuperPageSize; | 515 currentExtent->superPagesEnd += kSuperPageSize; |
513 ASSERT(ret >= currentExtent->superPageBase && | 516 ASSERT(ret >= currentExtent->superPageBase && |
514 ret < currentExtent->superPagesEnd); | 517 ret < currentExtent->superPagesEnd); |
515 } | 518 } |
516 return ret; | 519 return ret; |
517 } | 520 } |
518 | 521 |
519 static ALWAYS_INLINE uint16_t | 522 static ALWAYS_INLINE uint16_t |
520 partitionBucketPartitionPages(const PartitionBucket* bucket) { | 523 partitionBucketPartitionPages(const PartitionBucket* bucket) { |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
556 } | 559 } |
557 } | 560 } |
558 | 561 |
559 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist( | 562 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist( |
560 PartitionPage* page) { | 563 PartitionPage* page) { |
561 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 564 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
562 uint16_t numSlots = page->numUnprovisionedSlots; | 565 uint16_t numSlots = page->numUnprovisionedSlots; |
563 ASSERT(numSlots); | 566 ASSERT(numSlots); |
564 PartitionBucket* bucket = page->bucket; | 567 PartitionBucket* bucket = page->bucket; |
565 // We should only get here when _every_ slot is either used or unprovisioned. | 568 // We should only get here when _every_ slot is either used or unprovisioned. |
566 // (The third state is "on the freelist". If we have a non-empty freelist, we
should not get here.) | 569 // (The third state is "on the freelist". If we have a non-empty freelist, we |
| 570 // should not get here.) |
567 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); | 571 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); |
568 // Similarly, make explicitly sure that the freelist is empty. | 572 // Similarly, make explicitly sure that the freelist is empty. |
569 ASSERT(!page->freelistHead); | 573 ASSERT(!page->freelistHead); |
570 ASSERT(page->numAllocatedSlots >= 0); | 574 ASSERT(page->numAllocatedSlots >= 0); |
571 | 575 |
572 size_t size = bucket->slotSize; | 576 size_t size = bucket->slotSize; |
573 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); | 577 char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); |
574 char* returnObject = base + (size * page->numAllocatedSlots); | 578 char* returnObject = base + (size * page->numAllocatedSlots); |
575 char* firstFreelistPointer = returnObject + size; | 579 char* firstFreelistPointer = returnObject + size; |
576 char* firstFreelistPointerExtent = | 580 char* firstFreelistPointerExtent = |
(...skipping 14 matching lines...) Expand all Loading... |
591 // space, we may get an off-by-one when a freelist pointer fits in the | 595 // space, we may get an off-by-one when a freelist pointer fits in the |
592 // wasted space, but a slot does not. | 596 // wasted space, but a slot does not. |
593 // We know we can fit at least one freelist pointer. | 597 // We know we can fit at least one freelist pointer. |
594 numNewFreelistEntries = 1; | 598 numNewFreelistEntries = 1; |
595 // Any further entries require space for the whole slot span. | 599 // Any further entries require space for the whole slot span. |
596 numNewFreelistEntries += static_cast<uint16_t>( | 600 numNewFreelistEntries += static_cast<uint16_t>( |
597 (freelistLimit - firstFreelistPointerExtent) / size); | 601 (freelistLimit - firstFreelistPointerExtent) / size); |
598 } | 602 } |
599 | 603 |
600 // We always return an object slot -- that's the +1 below. | 604 // We always return an object slot -- that's the +1 below. |
601 // We do not neccessarily create any new freelist entries, because we cross su
b page boundaries frequently for large bucket sizes. | 605 // We do not neccessarily create any new freelist entries, because we cross |
| 606 // sub page boundaries frequently for large bucket sizes. |
602 ASSERT(numNewFreelistEntries + 1 <= numSlots); | 607 ASSERT(numNewFreelistEntries + 1 <= numSlots); |
603 numSlots -= (numNewFreelistEntries + 1); | 608 numSlots -= (numNewFreelistEntries + 1); |
604 page->numUnprovisionedSlots = numSlots; | 609 page->numUnprovisionedSlots = numSlots; |
605 page->numAllocatedSlots++; | 610 page->numAllocatedSlots++; |
606 | 611 |
607 if (LIKELY(numNewFreelistEntries)) { | 612 if (LIKELY(numNewFreelistEntries)) { |
608 char* freelistPointer = firstFreelistPointer; | 613 char* freelistPointer = firstFreelistPointer; |
609 PartitionFreelistEntry* entry = | 614 PartitionFreelistEntry* entry = |
610 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); | 615 reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); |
611 page->freelistHead = entry; | 616 page->freelistHead = entry; |
(...skipping 866 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1478 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBytes; | 1483 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBytes; |
1479 if (!isLightDump) | 1484 if (!isLightDump) |
1480 partitionStatsDumper->partitionsDumpBucketStats(partitionName, | 1485 partitionStatsDumper->partitionsDumpBucketStats(partitionName, |
1481 &memoryStats[i]); | 1486 &memoryStats[i]); |
1482 } | 1487 } |
1483 } | 1488 } |
1484 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | 1489 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); |
1485 } | 1490 } |
1486 | 1491 |
1487 } // namespace WTF | 1492 } // namespace WTF |
OLD | NEW |