Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 429 latestExtent->superPagesEnd = superPage + kSuperPageSize; | 429 latestExtent->superPagesEnd = superPage + kSuperPageSize; |
| 430 } else { | 430 } else { |
| 431 // We allocated next to an existing extent so just nudge the size up a l ittle. | 431 // We allocated next to an existing extent so just nudge the size up a l ittle. |
| 432 ASSERT(currentExtent->superPagesEnd); | 432 ASSERT(currentExtent->superPagesEnd); |
| 433 currentExtent->superPagesEnd += kSuperPageSize; | 433 currentExtent->superPagesEnd += kSuperPageSize; |
| 434 ASSERT(ret >= currentExtent->superPageBase && ret < currentExtent->super PagesEnd); | 434 ASSERT(ret >= currentExtent->superPageBase && ret < currentExtent->super PagesEnd); |
| 435 } | 435 } |
| 436 return ret; | 436 return ret; |
| 437 } | 437 } |
| 438 | 438 |
| 439 static ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) | |
| 440 { | |
| 441 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; | |
| 442 } | |
| 443 | |
| 444 static ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket ) | |
| 445 { | |
| 446 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize ); | |
| 447 } | |
| 448 | |
| 449 static ALWAYS_INLINE uint16_t partitionBucketPartitionPages(const PartitionBucke t* bucket) | 439 static ALWAYS_INLINE uint16_t partitionBucketPartitionPages(const PartitionBucke t* bucket) |
| 450 { | 440 { |
| 451 return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage - 1)) / kNumSystemPagesPerPartitionPage; | 441 return (bucket->numSystemPagesPerSlotSpan + (kNumSystemPagesPerPartitionPage - 1)) / kNumSystemPagesPerPartitionPage; |
| 452 } | 442 } |
| 453 | 443 |
| 454 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page, PartitionBucke t* bucket) | 444 static ALWAYS_INLINE void partitionPageReset(PartitionPage* page, PartitionBucke t* bucket) |
| 455 { | 445 { |
| 456 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 446 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 457 ASSERT(page->bucket == bucket); | 447 ASSERT(page->bucket == bucket); |
| 458 ASSERT(!page->freelistHead); | 448 ASSERT(!page->freelistHead); |
| (...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 612 } | 602 } |
| 613 } | 603 } |
| 614 | 604 |
| 615 bucket->activePagesHead = 0; | 605 bucket->activePagesHead = 0; |
| 616 return false; | 606 return false; |
| 617 } | 607 } |
| 618 | 608 |
| 619 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(Pa rtitionPage* page) | 609 static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(Pa rtitionPage* page) |
| 620 { | 610 { |
| 621 ASSERT(partitionBucketIsDirectMapped(page->bucket)); | 611 ASSERT(partitionBucketIsDirectMapped(page->bucket)); |
| 622 return reinterpret_cast<PartitionDirectMapExtent*>(reinterpret_cast<char*>(p age) + 2 * kPageMetadataSize); | 612 return reinterpret_cast<PartitionDirectMapExtent*>(reinterpret_cast<char*>(p age) + 3 * kPageMetadataSize); |
| 623 } | 613 } |
| 624 | 614 |
| 625 static ALWAYS_INLINE void* partitionDirectMap(PartitionRootBase* root, int flags , size_t size) | 615 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, size_t si ze) |
| 626 { | 616 { |
| 627 size = partitionDirectMapSize(size); | 617 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); |
| 618 if (UNLIKELY(rawSizePtr != nullptr)) | |
| 619 *rawSizePtr = size; | |
| 620 } | |
| 621 | |
| 622 static ALWAYS_INLINE void* partitionDirectMap(PartitionRootBase* root, int flags , size_t rawSize) | |
| 623 { | |
| 624 size_t size = partitionDirectMapSize(rawSize); | |
| 628 | 625 |
| 629 // Because we need to fake looking like a super page, we need to allocate | 626 // Because we need to fake looking like a super page, we need to allocate |
| 630 // a bunch of system pages more than "size": | 627 // a bunch of system pages more than "size": |
| 631 // - The first few system pages are the partition page in which the super | 628 // - The first few system pages are the partition page in which the super |
| 632 // page metadata is stored. We fault just one system page out of a partition | 629 // page metadata is stored. We fault just one system page out of a partition |
| 633 // page sized clump. | 630 // page sized clump. |
| 634 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the | 631 // - We add a trailing guard page on 32-bit (on 64-bit we rely on the |
| 635 // massive address space plus randomization instead). | 632 // massive address space plus randomization instead). |
| 636 size_t mapSize = size + kPartitionPageSize; | 633 size_t mapSize = size + kPartitionPageSize; |
| 637 #if !CPU(64BIT) | 634 #if !CPU(64BIT) |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 660 | 657 |
| 661 PartitionSuperPageExtentEntry* extent = reinterpret_cast<PartitionSuperPageE xtentEntry*>(partitionSuperPageToMetadataArea(ptr)); | 658 PartitionSuperPageExtentEntry* extent = reinterpret_cast<PartitionSuperPageE xtentEntry*>(partitionSuperPageToMetadataArea(ptr)); |
| 662 extent->root = root; | 659 extent->root = root; |
| 663 // Most new extents will be part of a larger extent, and these three fields | 660 // Most new extents will be part of a larger extent, and these three fields |
| 664 // are unused, but we initialize them to 0 so that we get a clear signal | 661 // are unused, but we initialize them to 0 so that we get a clear signal |
| 665 // in case they are accidentally used. | 662 // in case they are accidentally used. |
| 666 extent->superPageBase = 0; | 663 extent->superPageBase = 0; |
| 667 extent->superPagesEnd = 0; | 664 extent->superPagesEnd = 0; |
| 668 extent->next = 0; | 665 extent->next = 0; |
| 669 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ret); | 666 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ret); |
| 670 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(reinterpret_cas t<char*>(page) + kPageMetadataSize); | 667 PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(reinterpret_cas t<char*>(page) + (kPageMetadataSize * 2)); |
| 671 page->freelistHead = 0; | 668 page->freelistHead = 0; |
| 672 page->nextPage = 0; | 669 page->nextPage = 0; |
| 673 page->bucket = bucket; | 670 page->bucket = bucket; |
| 674 page->numAllocatedSlots = 1; | 671 page->numAllocatedSlots = 1; |
| 675 page->numUnprovisionedSlots = 0; | 672 page->numUnprovisionedSlots = 0; |
| 676 page->pageOffset = 0; | 673 page->pageOffset = 0; |
| 677 page->emptyCacheIndex = 0; | 674 page->emptyCacheIndex = 0; |
| 678 | 675 |
| 679 bucket->activePagesHead = 0; | 676 bucket->activePagesHead = 0; |
| 680 bucket->emptyPagesHead = 0; | 677 bucket->emptyPagesHead = 0; |
| 681 bucket->slotSize = size; | 678 bucket->slotSize = size; |
| 682 bucket->numSystemPagesPerSlotSpan = 0; | 679 bucket->numSystemPagesPerSlotSpan = 0; |
| 683 bucket->numFullPages = 0; | 680 bucket->numFullPages = 0; |
| 684 | 681 |
| 682 partitionPageSetRawSize(page, rawSize); | |
| 683 ASSERT(partitionPageGetRawSize(page) == rawSize); | |
| 684 | |
| 685 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); | 685 PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); |
| 686 mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; | 686 mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; |
| 687 mapExtent->bucket = bucket; | 687 mapExtent->bucket = bucket; |
| 688 | 688 |
| 689 // Maintain the doubly-linked list of all direct mappings. | 689 // Maintain the doubly-linked list of all direct mappings. |
| 690 mapExtent->nextExtent = root->directMapList; | 690 mapExtent->nextExtent = root->directMapList; |
| 691 if (mapExtent->nextExtent) | 691 if (mapExtent->nextExtent) |
| 692 mapExtent->nextExtent->prevExtent = mapExtent; | 692 mapExtent->nextExtent->prevExtent = mapExtent; |
| 693 mapExtent->prevExtent = nullptr; | 693 mapExtent->prevExtent = nullptr; |
| 694 root->directMapList = mapExtent; | 694 root->directMapList = mapExtent; |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 726 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask)); | 726 ASSERT(!(unmapSize & kPageAllocationGranularityOffsetMask)); |
| 727 | 727 |
| 728 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | 728 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); |
| 729 // Account for the mapping starting a partition page before the actual | 729 // Account for the mapping starting a partition page before the actual |
| 730 // allocation address. | 730 // allocation address. |
| 731 ptr -= kPartitionPageSize; | 731 ptr -= kPartitionPageSize; |
| 732 | 732 |
| 733 freePages(ptr, unmapSize); | 733 freePages(ptr, unmapSize); |
| 734 } | 734 } |
| 735 | 735 |
| 736 static ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) | |
| 737 { | |
| 738 // For single-slot buckets which span more than one partition page, we | |
| 739 // have some spare metadata space to store the raw allocation size. We | |
| 740 // can use this to report better statistics. | |
| 741 PartitionBucket* bucket = page->bucket; | |
| 742 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) | |
| 743 return nullptr; | |
| 744 | |
| 745 ASSERT(partitionBucketSlots(bucket) == 1); | |
| 746 page++; | |
| 747 return reinterpret_cast<size_t*>(&page->freelistHead); | |
| 748 } | |
| 749 | |
| 750 static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, size_t si ze) | |
| 751 { | |
| 752 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | |
| 753 if (UNLIKELY(rawSizePtr != nullptr)) | |
| 754 *rawSizePtr = size; | |
| 755 } | |
| 756 | |
| 757 static size_t partitionPageGetRawSize(PartitionPage* page) | |
| 758 { | |
| 759 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | |
| 760 if (UNLIKELY(rawSizePtr != nullptr)) | |
| 761 return *rawSizePtr; | |
| 762 return 0; | |
| 763 } | |
| 764 | |
| 765 void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa rtitionBucket* bucket) | 736 void* partitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, Pa rtitionBucket* bucket) |
| 766 { | 737 { |
| 767 // The slow path is called when the freelist is empty. | 738 // The slow path is called when the freelist is empty. |
| 768 ASSERT(!bucket->activePagesHead->freelistHead); | 739 ASSERT(!bucket->activePagesHead->freelistHead); |
| 769 | 740 |
| 770 PartitionPage* newPage = nullptr; | 741 PartitionPage* newPage = nullptr; |
| 771 | 742 |
| 772 // For the partitionAllocGeneric API, we have a bunch of buckets marked | 743 // For the partitionAllocGeneric API, we have a bunch of buckets marked |
| 773 // as special cases. We bounce them through to the slow path so that we | 744 // as special cases. We bounce them through to the slow path so that we |
| 774 // can still have a blazing fast hot path due to lack of corner-case | 745 // can still have a blazing fast hot path due to lack of corner-case |
| (...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 923 void partitionFreeSlowPath(PartitionPage* page) | 894 void partitionFreeSlowPath(PartitionPage* page) |
| 924 { | 895 { |
| 925 PartitionBucket* bucket = page->bucket; | 896 PartitionBucket* bucket = page->bucket; |
| 926 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 897 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 927 if (LIKELY(page->numAllocatedSlots == 0)) { | 898 if (LIKELY(page->numAllocatedSlots == 0)) { |
| 928 // Page became fully unused. | 899 // Page became fully unused. |
| 929 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 900 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
| 930 partitionDirectUnmap(page); | 901 partitionDirectUnmap(page); |
| 931 return; | 902 return; |
| 932 } | 903 } |
| 933 // If it's the current active page, attempt to change it. We'd prefer to leave | 904 // Make sure all large allocations always bounce through the slow path, |
| 934 // the page empty as a gentle force towards defragmentation. | 905 // so that we correctly update size metadata. |
| 906 // TODO(cevans): remove this special case when we start bouncing empty | |
| 907 // pages straight to the empty list. This will happen soon. | |
| 908 if (UNLIKELY(partitionPageGetRawSize(page))) { | |
| 909 // We can make that allocations from this empty page bounce | |
| 910 // through the slow path by marking the freelist head as null. To | |
| 911 // get the single slot filled correctly, we also have to tag the | |
| 912 // page as having a single unprovisioned slot. | |
|
haraken
2015/06/20 00:03:08
Thanks, now I understand :)
| |
| 913 ASSERT(partitionBucketSlots(bucket) == 1); | |
| 914 page->numUnprovisionedSlots = 1; | |
| 915 page->freelistHead = nullptr; | |
| 916 } | |
| 917 // If it's the current active page, attempt to change it. We'd prefer to | |
| 918 // leave the page empty as a gentle force towards defragmentation. | |
| 935 if (LIKELY(page == bucket->activePagesHead) && page->nextPage) { | 919 if (LIKELY(page == bucket->activePagesHead) && page->nextPage) { |
| 936 if (partitionSetNewActivePage(page->nextPage)) { | 920 if (partitionSetNewActivePage(page->nextPage)) { |
| 937 ASSERT(bucket->activePagesHead != page); | 921 ASSERT(bucket->activePagesHead != page); |
| 938 // Link the empty page back in after the new current page, to | 922 // Link the empty page back in after the new current page, to |
| 939 // avoid losing a reference to it. | 923 // avoid losing a reference to it. |
| 940 // TODO: consider walking the list to link the empty page after | 924 // TODO: consider walking the list to link the empty page after |
| 941 // all non-empty pages? | 925 // all non-empty pages? |
| 942 PartitionPage* currentPage = bucket->activePagesHead; | 926 PartitionPage* currentPage = bucket->activePagesHead; |
| 943 page->nextPage = currentPage->nextPage; | 927 page->nextPage = currentPage->nextPage; |
| 944 currentPage->nextPage = page; | 928 currentPage->nextPage = page; |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 968 page->nextPage = bucket->activePagesHead; | 952 page->nextPage = bucket->activePagesHead; |
| 969 bucket->activePagesHead = page; | 953 bucket->activePagesHead = page; |
| 970 --bucket->numFullPages; | 954 --bucket->numFullPages; |
| 971 // Special case: for a partition page with just a single slot, it may | 955 // Special case: for a partition page with just a single slot, it may |
| 972 // now be empty and we want to run it through the empty logic. | 956 // now be empty and we want to run it through the empty logic. |
| 973 if (UNLIKELY(page->numAllocatedSlots == 0)) | 957 if (UNLIKELY(page->numAllocatedSlots == 0)) |
| 974 partitionFreeSlowPath(page); | 958 partitionFreeSlowPath(page); |
| 975 } | 959 } |
| 976 } | 960 } |
| 977 | 961 |
| 978 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, PartitionPa ge* page, size_t newSize) | 962 bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, PartitionPa ge* page, size_t rawSize) |
| 979 { | 963 { |
| 980 ASSERT(partitionBucketIsDirectMapped(page->bucket)); | 964 ASSERT(partitionBucketIsDirectMapped(page->bucket)); |
| 981 | 965 |
| 982 newSize = partitionCookieSizeAdjustAdd(newSize); | 966 rawSize = partitionCookieSizeAdjustAdd(rawSize); |
| 983 | 967 |
| 984 // Note that the new size might be a bucketed size; this function is called | 968 // Note that the new size might be a bucketed size; this function is called |
| 985 // whenever we're reallocating a direct mapped allocation. | 969 // whenever we're reallocating a direct mapped allocation. |
| 986 newSize = partitionDirectMapSize(newSize); | 970 size_t newSize = partitionDirectMapSize(rawSize); |
| 987 if (newSize < kGenericMinDirectMappedDownsize) | 971 if (newSize < kGenericMinDirectMappedDownsize) |
| 988 return false; | 972 return false; |
| 989 | 973 |
| 990 // bucket->slotSize is the current size of the allocation. | 974 // bucket->slotSize is the current size of the allocation. |
| 991 size_t currentSize = page->bucket->slotSize; | 975 size_t currentSize = page->bucket->slotSize; |
| 992 if (newSize == currentSize) | 976 if (newSize == currentSize) |
| 993 return true; | 977 return true; |
| 994 | 978 |
| 995 char* charPtr = static_cast<char*>(partitionPageToPointer(page)); | 979 char* charPtr = static_cast<char*>(partitionPageToPointer(page)); |
| 996 | 980 |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 1018 memset(charPtr + currentSize, kUninitializedByte, recommitSize); | 1002 memset(charPtr + currentSize, kUninitializedByte, recommitSize); |
| 1019 #endif | 1003 #endif |
| 1020 } else { | 1004 } else { |
| 1021 // We can't perform the realloc in-place. | 1005 // We can't perform the realloc in-place. |
| 1022 // TODO: support this too when possible. | 1006 // TODO: support this too when possible. |
| 1023 return false; | 1007 return false; |
| 1024 } | 1008 } |
| 1025 | 1009 |
| 1026 #if ENABLE(ASSERT) | 1010 #if ENABLE(ASSERT) |
| 1027 // Write a new trailing cookie. | 1011 // Write a new trailing cookie. |
| 1028 partitionCookieWriteValue(charPtr + newSize - kCookieSize); | 1012 partitionCookieWriteValue(charPtr + rawSize - kCookieSize); |
| 1029 #endif | 1013 #endif |
| 1030 | 1014 |
| 1015 partitionPageSetRawSize(page, rawSize); | |
| 1016 ASSERT(partitionPageGetRawSize(page) == rawSize); | |
| 1017 | |
| 1031 page->bucket->slotSize = newSize; | 1018 page->bucket->slotSize = newSize; |
| 1032 return true; | 1019 return true; |
| 1033 } | 1020 } |
| 1034 | 1021 |
| 1035 void* partitionReallocGeneric(PartitionRootGeneric* root, void* ptr, size_t newS ize) | 1022 void* partitionReallocGeneric(PartitionRootGeneric* root, void* ptr, size_t newS ize) |
| 1036 { | 1023 { |
| 1037 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 1024 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 1038 return realloc(ptr, newSize); | 1025 return realloc(ptr, newSize); |
| 1039 #else | 1026 #else |
| 1040 if (UNLIKELY(!ptr)) | 1027 if (UNLIKELY(!ptr)) |
| (...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1214 // partitionsDumpBucketStats is called after collecting stats because it | 1201 // partitionsDumpBucketStats is called after collecting stats because it |
| 1215 // can use PartitionAlloc to allocate and this can affect the statistics. | 1202 // can use PartitionAlloc to allocate and this can affect the statistics. |
| 1216 for (size_t i = 0; i < partitionNumBuckets; ++i) { | 1203 for (size_t i = 0; i < partitionNumBuckets; ++i) { |
| 1217 if (memoryStats[i].isValid) | 1204 if (memoryStats[i].isValid) |
| 1218 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &memo ryStats[i]); | 1205 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &memo ryStats[i]); |
| 1219 } | 1206 } |
| 1220 } | 1207 } |
| 1221 | 1208 |
| 1222 } // namespace WTF | 1209 } // namespace WTF |
| 1223 | 1210 |
| OLD | NEW |