Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 459 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 470 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar Ptr); | 470 PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageChar Ptr); |
| 471 secondaryPage->pageOffset = i; | 471 secondaryPage->pageOffset = i; |
| 472 } | 472 } |
| 473 } | 473 } |
| 474 | 474 |
| 475 static ALWAYS_INLINE size_t partitionRoundUpToSystemPage(size_t size) | 475 static ALWAYS_INLINE size_t partitionRoundUpToSystemPage(size_t size) |
| 476 { | 476 { |
| 477 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; | 477 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; |
| 478 } | 478 } |
| 479 | 479 |
| 480 static ALWAYS_INLINE size_t partitionRoundDownToSystemPage(size_t size) | |
| 481 { | |
| 482 return size & kSystemPageBaseMask; | |
| 483 } | |
| 484 | |
| 480 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page ) | 485 static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(PartitionPage* page ) |
| 481 { | 486 { |
| 482 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 487 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 483 uint16_t numSlots = page->numUnprovisionedSlots; | 488 uint16_t numSlots = page->numUnprovisionedSlots; |
| 484 ASSERT(numSlots); | 489 ASSERT(numSlots); |
| 485 PartitionBucket* bucket = page->bucket; | 490 PartitionBucket* bucket = page->bucket; |
| 486 // We should only get here when _every_ slot is either used or unprovisioned . | 491 // We should only get here when _every_ slot is either used or unprovisioned . |
| 487 // (The third state is "on the freelist". If we have a non-empty freelist, w e should not get here.) | 492 // (The third state is "on the freelist". If we have a non-empty freelist, w e should not get here.) |
| 488 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); | 493 ASSERT(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); |
| 489 // Similarly, make explicitly sure that the freelist is empty. | 494 // Similarly, make explicitly sure that the freelist is empty. |
| (...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 861 static void partitionDecommitEmptyPages(PartitionRootBase* root) | 866 static void partitionDecommitEmptyPages(PartitionRootBase* root) |
| 862 { | 867 { |
| 863 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { | 868 for (size_t i = 0; i < kMaxFreeableSpans; ++i) { |
| 864 PartitionPage* page = root->globalEmptyPageRing[i]; | 869 PartitionPage* page = root->globalEmptyPageRing[i]; |
| 865 if (page) | 870 if (page) |
| 866 partitionDecommitPageIfPossible(root, page); | 871 partitionDecommitPageIfPossible(root, page); |
| 867 root->globalEmptyPageRing[i] = nullptr; | 872 root->globalEmptyPageRing[i] = nullptr; |
| 868 } | 873 } |
| 869 } | 874 } |
| 870 | 875 |
| 871 void partitionPurgeMemory(PartitionRoot* root, int flags) | |
| 872 { | |
| 873 if (flags & PartitionPurgeDecommitEmptyPages) | |
| 874 partitionDecommitEmptyPages(root); | |
| 875 } | |
| 876 | |
| 877 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) | |
| 878 { | |
| 879 spinLockLock(&root->lock); | |
| 880 if (flags & PartitionPurgeDecommitEmptyPages) | |
| 881 partitionDecommitEmptyPages(root); | |
| 882 spinLockUnlock(&root->lock); | |
| 883 } | |
| 884 | |
| 885 void partitionFreeSlowPath(PartitionPage* page) | 876 void partitionFreeSlowPath(PartitionPage* page) |
| 886 { | 877 { |
| 887 PartitionBucket* bucket = page->bucket; | 878 PartitionBucket* bucket = page->bucket; |
| 888 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 879 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 889 if (LIKELY(page->numAllocatedSlots == 0)) { | 880 if (LIKELY(page->numAllocatedSlots == 0)) { |
| 890 // Page became fully unused. | 881 // Page became fully unused. |
| 891 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { | 882 if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { |
| 892 partitionDirectUnmap(page); | 883 partitionDirectUnmap(page); |
| 893 return; | 884 return; |
| 894 } | 885 } |
| (...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1055 size_t copySize = actualOldSize; | 1046 size_t copySize = actualOldSize; |
| 1056 if (newSize < copySize) | 1047 if (newSize < copySize) |
| 1057 copySize = newSize; | 1048 copySize = newSize; |
| 1058 | 1049 |
| 1059 memcpy(ret, ptr, copySize); | 1050 memcpy(ret, ptr, copySize); |
| 1060 partitionFreeGeneric(root, ptr); | 1051 partitionFreeGeneric(root, ptr); |
| 1061 return ret; | 1052 return ret; |
| 1062 #endif | 1053 #endif |
| 1063 } | 1054 } |
| 1064 | 1055 |
| 1056 void partitionPurgeMemory(PartitionRoot* root, int flags) | |
|
haraken
2015/06/22 09:22:00
I'd move this to just before partitionPurgeMemoryG
| |
| 1057 { | |
| 1058 if (flags & PartitionPurgeDecommitEmptyPages) | |
| 1059 partitionDecommitEmptyPages(root); | |
| 1060 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages | |
| 1061 // here because that flag is only useful for allocations >= system page | |
| 1062 // size. We only have allocations that large inside generic partitions | |
| 1063 // at the moment. | |
| 1064 } | |
| 1065 | |
| 1066 static size_t partitionPurgePage(const PartitionPage* page, bool discard) | |
| 1067 { | |
| 1068 const PartitionBucket* bucket = page->bucket; | |
| 1069 if (bucket->slotSize < kSystemPageSize || !page->numAllocatedSlots) | |
| 1070 return 0; | |
| 1071 | |
| 1072 size_t bucketNumSlots = partitionBucketSlots(bucket); | |
| 1073 size_t discardableBytes = 0; | |
| 1074 | |
| 1075 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | |
| 1076 if (rawSize) { | |
| 1077 uint32_t usedBytes = static_cast<uint32_t>(partitionRoundUpToSystemPage( rawSize)); | |
| 1078 discardableBytes = bucket->slotSize - usedBytes; | |
|
haraken
2015/06/22 09:22:00
Is it guaranteed that bucket->slotSize is a multip
| |
| 1079 if (discardableBytes && discard) { | |
| 1080 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | |
| 1081 ptr += usedBytes; | |
| 1082 discardSystemPages(ptr, discardableBytes); | |
|
haraken
2015/06/22 09:22:00
For a single-slot allocation, we're committing the
| |
| 1083 } | |
| 1084 return discardableBytes; | |
| 1085 } | |
| 1086 | |
| 1087 char slotUsage[(kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSyste mPageSize]; | |
|
haraken
2015/06/22 09:22:00
size_t maxSlotCount = (kPartitionPageSize * kMaxPa
| |
| 1088 size_t lastSlot = -1; | |
| 1089 memset(slotUsage, 1, sizeof(slotUsage)); | |
| 1090 char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); | |
| 1091 PartitionFreelistEntry* fl = page->freelistHead; | |
|
haraken
2015/06/22 09:22:00
fl => entry or freelist
| |
| 1092 // First, walk the freelist for this page and make a bitmap of which slots | |
| 1093 // are not in use. | |
| 1094 while (fl) { | |
| 1095 size_t slotIndex = (reinterpret_cast<char*>(fl) - ptr) / bucket->slotSiz e; | |
| 1096 ASSERT(slotIndex < bucketNumSlots); | |
| 1097 slotUsage[slotIndex] = 0; | |
| 1098 fl = partitionFreelistMask(fl->next); | |
| 1099 if (!fl && !partitionFreelistMask(fl)) | |
|
haraken
2015/06/22 09:22:00
Is it possible that this condition becomes true? I
| |
| 1100 lastSlot = slotIndex; | |
| 1101 } | |
| 1102 // Next, walk the slots and for any not in use, consider where the system | |
| 1103 // page boundaries occur. We can release any system pages back to the | |
| 1104 // system as long as we don't interfere with a freelist pointer or an | |
| 1105 // adjacent slot. | |
| 1106 // TODO(cevans): I think we can "truncate" the page, i.e. increase the | |
| 1107 // value of page->numUnprovisionedSlots and rewrite(!) the freelist, if | |
| 1108 // we find that to be a win too. | |
| 1109 for (size_t i = 0; i < bucketNumSlots; ++i) { | |
| 1110 if (slotUsage[i]) | |
| 1111 continue; | |
| 1112 // The first address we can safely discard is just after the freelist | |
| 1113 // pointer. There's one quirk: if the freelist pointer is actually a | |
| 1114 // null, we can discard that pointer value too. | |
| 1115 char* beginPtr = ptr + (i * bucket->slotSize); | |
| 1116 char* endPtr = beginPtr + bucket->slotSize; | |
| 1117 if (i != lastSlot) | |
| 1118 beginPtr += sizeof(PartitionFreelistEntry); | |
| 1119 beginPtr = reinterpret_cast<char*>(partitionRoundUpToSystemPage(reinterp ret_cast<size_t>(beginPtr))); | |
| 1120 endPtr = reinterpret_cast<char*>(partitionRoundDownToSystemPage(reinterp ret_cast<size_t>(endPtr))); | |
| 1121 if (beginPtr < endPtr) { | |
| 1122 size_t partialSlotBytes = endPtr - beginPtr; | |
| 1123 discardableBytes += partialSlotBytes; | |
| 1124 if (discard) | |
| 1125 discardSystemPages(beginPtr, partialSlotBytes); | |
| 1126 } | |
| 1127 } | |
| 1128 return discardableBytes; | |
| 1129 } | |
| 1130 | |
| 1131 static void partitionPurgeBucket(const PartitionBucket* bucket) | |
| 1132 { | |
| 1133 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { | |
| 1134 for (const PartitionPage* page = bucket->activePagesHead; page; page = p age->nextPage) { | |
| 1135 ASSERT(page != &PartitionRootGeneric::gSeedPage); | |
| 1136 (void) partitionPurgePage(page, true); | |
|
haraken
2015/06/22 09:22:00
Nit: (void) won't be needed.
| |
| 1137 } | |
| 1138 } | |
| 1139 } | |
| 1140 | |
| 1141 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) | |
| 1142 { | |
| 1143 spinLockLock(&root->lock); | |
| 1144 if (flags & PartitionPurgeDecommitEmptyPages) | |
| 1145 partitionDecommitEmptyPages(root); | |
| 1146 if (flags & PartitionPurgeDiscardUnusedSystemPages) { | |
| 1147 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | |
| 1148 const PartitionBucket* bucket = &root->buckets[i]; | |
| 1149 if (bucket->slotSize >= kSystemPageSize) | |
| 1150 partitionPurgeBucket(bucket); | |
| 1151 } | |
| 1152 } | |
| 1153 spinLockUnlock(&root->lock); | |
| 1154 } | |
| 1155 | |
| 1065 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P artitionPage* page) | 1156 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P artitionPage* page) |
| 1066 { | 1157 { |
| 1067 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); | 1158 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); |
| 1068 | 1159 |
| 1069 if (!page->freelistHead && page->numAllocatedSlots == 0) { | 1160 if (!page->freelistHead && page->numAllocatedSlots == 0) { |
| 1070 ASSERT(!page->numUnprovisionedSlots); | 1161 ASSERT(!page->numUnprovisionedSlots); |
| 1071 ++statsOut->numDecommittedPages; | 1162 ++statsOut->numDecommittedPages; |
| 1163 return; | |
| 1164 } | |
| 1165 | |
| 1166 statsOut->discardableBytes += partitionPurgePage(page, false); | |
| 1167 | |
| 1168 size_t pageBytesResident = partitionRoundUpToSystemPage((bucketNumSlots - pa ge->numUnprovisionedSlots) * statsOut->bucketSlotSize); | |
| 1169 | |
| 1170 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); | |
| 1171 if (rawSize) { | |
| 1172 uint32_t activeBytes = static_cast<uint32_t>(partitionRoundUpToSystemPag e(rawSize)); | |
| 1173 statsOut->activeBytes += activeBytes; | |
| 1072 } else { | 1174 } else { |
| 1073 size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page )); | 1175 statsOut->activeBytes += (page->numAllocatedSlots * statsOut->bucketSlot Size); |
| 1074 if (rawSize) | 1176 } |
| 1075 statsOut->activeBytes += static_cast<uint32_t>(partitionRoundUpToSys temPage(rawSize)); | 1177 statsOut->residentBytes += pageBytesResident; |
| 1076 else | 1178 if (!page->numAllocatedSlots) { |
| 1077 statsOut->activeBytes += (page->numAllocatedSlots * statsOut->bucket SlotSize); | 1179 statsOut->decommittableBytes += pageBytesResident; |
| 1078 size_t pageBytesResident = (bucketNumSlots - page->numUnprovisionedSlots ) * statsOut->bucketSlotSize; | 1180 ++statsOut->numEmptyPages; |
| 1079 // Round up to system page size. | 1181 } else if (page->numAllocatedSlots == bucketNumSlots) { |
| 1080 size_t pageBytesResidentRounded = partitionRoundUpToSystemPage(pageBytes Resident); | 1182 ++statsOut->numFullPages; |
| 1081 statsOut->residentBytes += pageBytesResidentRounded; | 1183 } else { |
| 1082 if (!page->numAllocatedSlots) { | 1184 ++statsOut->numActivePages; |
| 1083 statsOut->decommittableBytes += pageBytesResidentRounded; | |
| 1084 ++statsOut->numEmptyPages; | |
| 1085 } else if (page->numAllocatedSlots == bucketNumSlots) { | |
| 1086 ++statsOut->numFullPages; | |
| 1087 } else { | |
| 1088 ++statsOut->numActivePages; | |
| 1089 } | |
| 1090 } | 1185 } |
| 1091 } | 1186 } |
| 1092 | 1187 |
| 1093 static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, const PartitionBucket* bucket) | 1188 static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, const PartitionBucket* bucket) |
| 1094 { | 1189 { |
| 1095 ASSERT(!partitionBucketIsDirectMapped(bucket)); | 1190 ASSERT(!partitionBucketIsDirectMapped(bucket)); |
| 1096 statsOut->isValid = false; | 1191 statsOut->isValid = false; |
| 1097 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), | 1192 // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), |
| 1098 // the bucket might still need to be reported if it has an empty page list, | 1193 // the bucket might still need to be reported if it has an empty page list, |
| 1099 // or full pages. | 1194 // or full pages. |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 1125 for (const PartitionPage* page = bucket->activePagesHead; page; page = p age->nextPage) { | 1220 for (const PartitionPage* page = bucket->activePagesHead; page; page = p age->nextPage) { |
| 1126 ASSERT(page != &PartitionRootGeneric::gSeedPage); | 1221 ASSERT(page != &PartitionRootGeneric::gSeedPage); |
| 1127 partitionDumpPageStats(statsOut, page); | 1222 partitionDumpPageStats(statsOut, page); |
| 1128 } | 1223 } |
| 1129 } | 1224 } |
| 1130 } | 1225 } |
| 1131 | 1226 |
| 1132 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* part itionName, PartitionStatsDumper* partitionStatsDumper) | 1227 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* part itionName, PartitionStatsDumper* partitionStatsDumper) |
| 1133 { | 1228 { |
| 1134 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; | 1229 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; |
| 1230 static const size_t kMaxReportableDirectMaps = 4096; | |
| 1231 uint32_t directMapLengths[kMaxReportableDirectMaps]; | |
| 1232 size_t numDirectMappedAllocations = 0; | |
| 1233 | |
| 1135 spinLockLock(&partition->lock); | 1234 spinLockLock(&partition->lock); |
| 1235 | |
| 1136 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1236 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1137 const PartitionBucket* bucket = &partition->buckets[i]; | 1237 const PartitionBucket* bucket = &partition->buckets[i]; |
| 1138 // Don't report the pseudo buckets that the generic allocator sets up in | 1238 // Don't report the pseudo buckets that the generic allocator sets up in |
| 1139 // order to preserve a fast size->bucket map (see | 1239 // order to preserve a fast size->bucket map (see |
| 1140 // partitionAllocGenericInit for details). | 1240 // partitionAllocGenericInit for details). |
| 1141 if (!bucket->activePagesHead) | 1241 if (!bucket->activePagesHead) |
| 1142 bucketStats[i].isValid = false; | 1242 bucketStats[i].isValid = false; |
| 1143 else | 1243 else |
| 1144 partitionDumpBucketStats(&bucketStats[i], bucket); | 1244 partitionDumpBucketStats(&bucketStats[i], bucket); |
| 1145 } | 1245 } |
| 1146 | 1246 |
| 1147 static const size_t kMaxReportableDirectMaps = 4096; | |
| 1148 uint32_t directMapLengths[kMaxReportableDirectMaps]; | |
| 1149 size_t numDirectMappedAllocations = 0; | |
| 1150 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; ex tent = extent->nextExtent) { | 1247 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; ex tent = extent->nextExtent) { |
| 1151 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent); | 1248 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent); |
| 1152 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; | 1249 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; |
| 1153 ++numDirectMappedAllocations; | 1250 ++numDirectMappedAllocations; |
| 1154 if (numDirectMappedAllocations == kMaxReportableDirectMaps) | 1251 if (numDirectMappedAllocations == kMaxReportableDirectMaps) |
| 1155 break; | 1252 break; |
| 1156 } | 1253 } |
| 1157 | 1254 |
| 1158 spinLockUnlock(&partition->lock); | 1255 spinLockUnlock(&partition->lock); |
| 1159 | 1256 |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1192 // partitionsDumpBucketStats is called after collecting stats because it | 1289 // partitionsDumpBucketStats is called after collecting stats because it |
| 1193 // can use PartitionAlloc to allocate and this can affect the statistics. | 1290 // can use PartitionAlloc to allocate and this can affect the statistics. |
| 1194 for (size_t i = 0; i < partitionNumBuckets; ++i) { | 1291 for (size_t i = 0; i < partitionNumBuckets; ++i) { |
| 1195 if (memoryStats[i].isValid) | 1292 if (memoryStats[i].isValid) |
| 1196 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &memo ryStats[i]); | 1293 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &memo ryStats[i]); |
| 1197 } | 1294 } |
| 1198 } | 1295 } |
| 1199 | 1296 |
| 1200 } // namespace WTF | 1297 } // namespace WTF |
| 1201 | 1298 |
| OLD | NEW |