| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 48 static_assert(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, "PartitionPa
ge should not be too big"); | 48 static_assert(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, "PartitionPa
ge should not be too big"); |
| 49 static_assert(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, "Partition
Bucket should not be too big"); | 49 static_assert(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, "Partition
Bucket should not be too big"); |
| 50 static_assert(sizeof(WTF::PartitionSuperPageExtentEntry) <= WTF::kPageMetadataSi
ze, "PartitionSuperPageExtentEntry should not be too big"); | 50 static_assert(sizeof(WTF::PartitionSuperPageExtentEntry) <= WTF::kPageMetadataSi
ze, "PartitionSuperPageExtentEntry should not be too big"); |
| 51 static_assert(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= WT
F::kSystemPageSize, "page metadata fits in hole"); | 51 static_assert(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= WT
F::kSystemPageSize, "page metadata fits in hole"); |
| 52 // Check that some of our zanier calculations worked out as expected. | 52 // Check that some of our zanier calculations worked out as expected. |
| 53 static_assert(WTF::kGenericSmallestBucket == 8, "generic smallest bucket"); | 53 static_assert(WTF::kGenericSmallestBucket == 8, "generic smallest bucket"); |
| 54 static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed"); | 54 static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed"); |
| 55 | 55 |
| 56 namespace WTF { | 56 namespace WTF { |
| 57 | 57 |
| 58 int PartitionRootBase::gInitializedLock = 0; | 58 SpinLock PartitionRootBase::gInitializedLock; |
| 59 bool PartitionRootBase::gInitialized = false; | 59 bool PartitionRootBase::gInitialized = false; |
| 60 PartitionPage PartitionRootBase::gSeedPage; | 60 PartitionPage PartitionRootBase::gSeedPage; |
| 61 PartitionBucket PartitionRootBase::gPagedBucket; | 61 PartitionBucket PartitionRootBase::gPagedBucket; |
| 62 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; | 62 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; |
| 63 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = nul
lptr; | 63 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = nul
lptr; |
| 64 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; | 64 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; |
| 65 | 65 |
| 66 static uint16_t partitionBucketNumSystemPages(size_t size) | 66 static uint16_t partitionBucketNumSystemPages(size_t size) |
| 67 { | 67 { |
| 68 // This works out reasonably for the current bucket sizes of the generic | 68 // This works out reasonably for the current bucket sizes of the generic |
| (...skipping 28 matching lines...) Expand all Loading... |
| 97 bestPages = i; | 97 bestPages = i; |
| 98 } | 98 } |
| 99 } | 99 } |
| 100 ASSERT(bestPages > 0); | 100 ASSERT(bestPages > 0); |
| 101 return bestPages; | 101 return bestPages; |
| 102 } | 102 } |
| 103 | 103 |
| 104 static void partitionAllocBaseInit(PartitionRootBase* root) | 104 static void partitionAllocBaseInit(PartitionRootBase* root) |
| 105 { | 105 { |
| 106 ASSERT(!root->initialized); | 106 ASSERT(!root->initialized); |
| 107 | 107 { |
| 108 spinLockLock(&PartitionRootBase::gInitializedLock); | 108 SpinLock::Guard guard(PartitionRootBase::gInitializedLock); |
| 109 if (!PartitionRootBase::gInitialized) { | 109 if (!PartitionRootBase::gInitialized) { |
| 110 PartitionRootBase::gInitialized = true; | 110 PartitionRootBase::gInitialized = true; |
| 111 // We mark the seed page as free to make sure it is skipped by our | 111 // We mark the seed page as free to make sure it is skipped by our |
| 112 // logic to find a new active page. | 112 // logic to find a new active page. |
| 113 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric:
:gSeedPage; | 113 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGene
ric::gSeedPage; |
| 114 } |
| 114 } | 115 } |
| 115 spinLockUnlock(&PartitionRootBase::gInitializedLock); | |
| 116 | 116 |
| 117 root->initialized = true; | 117 root->initialized = true; |
| 118 root->totalSizeOfCommittedPages = 0; | 118 root->totalSizeOfCommittedPages = 0; |
| 119 root->totalSizeOfSuperPages = 0; | 119 root->totalSizeOfSuperPages = 0; |
| 120 root->totalSizeOfDirectMappedPages = 0; | 120 root->totalSizeOfDirectMappedPages = 0; |
| 121 root->nextSuperPage = 0; | 121 root->nextSuperPage = 0; |
| 122 root->nextPartitionPage = 0; | 122 root->nextPartitionPage = 0; |
| 123 root->nextPartitionPageEnd = 0; | 123 root->nextPartitionPageEnd = 0; |
| 124 root->firstExtent = 0; | 124 root->firstExtent = 0; |
| 125 root->currentExtent = 0; | 125 root->currentExtent = 0; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 159 if (!i) | 159 if (!i) |
| 160 bucket->slotSize = kAllocationGranularity; | 160 bucket->slotSize = kAllocationGranularity; |
| 161 else | 161 else |
| 162 bucket->slotSize = i << kBucketShift; | 162 bucket->slotSize = i << kBucketShift; |
| 163 partitionBucketInitBase(bucket, root); | 163 partitionBucketInitBase(bucket, root); |
| 164 } | 164 } |
| 165 } | 165 } |
| 166 | 166 |
| 167 void partitionAllocGenericInit(PartitionRootGeneric* root) | 167 void partitionAllocGenericInit(PartitionRootGeneric* root) |
| 168 { | 168 { |
| 169 spinLockLock(&root->lock); | 169 SpinLock::Guard guard(root->lock); |
| 170 | 170 |
| 171 partitionAllocBaseInit(root); | 171 partitionAllocBaseInit(root); |
| 172 | 172 |
| 173 // Precalculate some shift and mask constants used in the hot path. | 173 // Precalculate some shift and mask constants used in the hot path. |
| 174 // Example: malloc(41) == 101001 binary. | 174 // Example: malloc(41) == 101001 binary. |
| 175 // Order is 6 (1 << 6-1)==32 is highest bit set. | 175 // Order is 6 (1 << 6-1)==32 is highest bit set. |
| 176 // orderIndex is the next three MSB == 010 == 2. | 176 // orderIndex is the next three MSB == 010 == 2. |
| 177 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 f
or the subOrderIndex). | 177 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 f
or the subOrderIndex). |
| 178 size_t order; | 178 size_t order; |
| 179 for (order = 0; order <= kBitsPerSizet; ++order) { | 179 for (order = 0; order <= kBitsPerSizet; ++order) { |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 236 *bucketPtr++ = validBucket; | 236 *bucketPtr++ = validBucket; |
| 237 bucket++; | 237 bucket++; |
| 238 } | 238 } |
| 239 } | 239 } |
| 240 } | 240 } |
| 241 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); | 241 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); |
| 242 ASSERT(bucketPtr == &root->bucketLookups[0] + ((kBitsPerSizet + 1) * kGeneri
cNumBucketsPerOrder)); | 242 ASSERT(bucketPtr == &root->bucketLookups[0] + ((kBitsPerSizet + 1) * kGeneri
cNumBucketsPerOrder)); |
| 243 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), | 243 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), |
| 244 // which tries to overflow to a non-existant order. | 244 // which tries to overflow to a non-existant order. |
| 245 *bucketPtr = &PartitionRootGeneric::gPagedBucket; | 245 *bucketPtr = &PartitionRootGeneric::gPagedBucket; |
| 246 | |
| 247 spinLockUnlock(&root->lock); | |
| 248 } | 246 } |
| 249 | 247 |
| 250 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) | 248 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) |
| 251 { | 249 { |
| 252 // Failure here indicates a memory leak. | 250 // Failure here indicates a memory leak. |
| 253 bool foundLeak = bucket->numFullPages; | 251 bool foundLeak = bucket->numFullPages; |
| 254 for (PartitionPage* page = bucket->activePagesHead; page; page = page->nextP
age) | 252 for (PartitionPage* page = bucket->activePagesHead; page; page = page->nextP
age) |
| 255 foundLeak |= (page->numAllocatedSlots > 0); | 253 foundLeak |= (page->numAllocatedSlots > 0); |
| 256 return foundLeak; | 254 return foundLeak; |
| 257 } | 255 } |
| (...skipping 28 matching lines...) Expand all Loading... |
| 286 for (i = 0; i < root->numBuckets; ++i) { | 284 for (i = 0; i < root->numBuckets; ++i) { |
| 287 PartitionBucket* bucket = &root->buckets()[i]; | 285 PartitionBucket* bucket = &root->buckets()[i]; |
| 288 foundLeak |= partitionAllocShutdownBucket(bucket); | 286 foundLeak |= partitionAllocShutdownBucket(bucket); |
| 289 } | 287 } |
| 290 foundLeak |= partitionAllocBaseShutdown(root); | 288 foundLeak |= partitionAllocBaseShutdown(root); |
| 291 return !foundLeak; | 289 return !foundLeak; |
| 292 } | 290 } |
| 293 | 291 |
| 294 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) | 292 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) |
| 295 { | 293 { |
| 296 spinLockLock(&root->lock); | 294 SpinLock::Guard guard(root->lock); |
| 297 bool foundLeak = false; | 295 bool foundLeak = false; |
| 298 size_t i; | 296 size_t i; |
| 299 for (i = 0; i < kGenericNumBuckets; ++i) { | 297 for (i = 0; i < kGenericNumBuckets; ++i) { |
| 300 PartitionBucket* bucket = &root->buckets[i]; | 298 PartitionBucket* bucket = &root->buckets[i]; |
| 301 foundLeak |= partitionAllocShutdownBucket(bucket); | 299 foundLeak |= partitionAllocShutdownBucket(bucket); |
| 302 } | 300 } |
| 303 foundLeak |= partitionAllocBaseShutdown(root); | 301 foundLeak |= partitionAllocBaseShutdown(root); |
| 304 spinLockUnlock(&root->lock); | |
| 305 return !foundLeak; | 302 return !foundLeak; |
| 306 } | 303 } |
| 307 | 304 |
| 308 #if !CPU(64BIT) | 305 #if !CPU(64BIT) |
| 309 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() | 306 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() |
| 310 { | 307 { |
| 311 IMMEDIATE_CRASH(); | 308 IMMEDIATE_CRASH(); |
| 312 } | 309 } |
| 313 #endif | 310 #endif |
| 314 | 311 |
| (...skipping 919 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1234 if (flags & PartitionPurgeDecommitEmptyPages) | 1231 if (flags & PartitionPurgeDecommitEmptyPages) |
| 1235 partitionDecommitEmptyPages(root); | 1232 partitionDecommitEmptyPages(root); |
| 1236 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages | 1233 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages |
| 1237 // here because that flag is only useful for allocations >= system page | 1234 // here because that flag is only useful for allocations >= system page |
| 1238 // size. We only have allocations that large inside generic partitions | 1235 // size. We only have allocations that large inside generic partitions |
| 1239 // at the moment. | 1236 // at the moment. |
| 1240 } | 1237 } |
| 1241 | 1238 |
| 1242 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) | 1239 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) |
| 1243 { | 1240 { |
| 1244 spinLockLock(&root->lock); | 1241 SpinLock::Guard guard(root->lock); |
| 1245 if (flags & PartitionPurgeDecommitEmptyPages) | 1242 if (flags & PartitionPurgeDecommitEmptyPages) |
| 1246 partitionDecommitEmptyPages(root); | 1243 partitionDecommitEmptyPages(root); |
| 1247 if (flags & PartitionPurgeDiscardUnusedSystemPages) { | 1244 if (flags & PartitionPurgeDiscardUnusedSystemPages) { |
| 1248 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1245 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1249 PartitionBucket* bucket = &root->buckets[i]; | 1246 PartitionBucket* bucket = &root->buckets[i]; |
| 1250 if (bucket->slotSize >= kSystemPageSize) | 1247 if (bucket->slotSize >= kSystemPageSize) |
| 1251 partitionPurgeBucket(bucket); | 1248 partitionPurgeBucket(bucket); |
| 1252 } | 1249 } |
| 1253 } | 1250 } |
| 1254 spinLockUnlock(&root->lock); | |
| 1255 } | 1251 } |
| 1256 | 1252 |
| 1257 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P
artitionPage* page) | 1253 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P
artitionPage* page) |
| 1258 { | 1254 { |
| 1259 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); | 1255 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); |
| 1260 | 1256 |
| 1261 if (partitionPageStateIsDecommitted(page)) { | 1257 if (partitionPageStateIsDecommitted(page)) { |
| 1262 ++statsOut->numDecommittedPages; | 1258 ++statsOut->numDecommittedPages; |
| 1263 return; | 1259 return; |
| 1264 } | 1260 } |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1322 } | 1318 } |
| 1323 } | 1319 } |
| 1324 | 1320 |
| 1325 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* part
itionName, bool isLightDump, PartitionStatsDumper* partitionStatsDumper) | 1321 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* part
itionName, bool isLightDump, PartitionStatsDumper* partitionStatsDumper) |
| 1326 { | 1322 { |
| 1327 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; | 1323 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; |
| 1328 static const size_t kMaxReportableDirectMaps = 4096; | 1324 static const size_t kMaxReportableDirectMaps = 4096; |
| 1329 uint32_t directMapLengths[kMaxReportableDirectMaps]; | 1325 uint32_t directMapLengths[kMaxReportableDirectMaps]; |
| 1330 size_t numDirectMappedAllocations = 0; | 1326 size_t numDirectMappedAllocations = 0; |
| 1331 | 1327 |
| 1332 spinLockLock(&partition->lock); | 1328 { |
| 1329 SpinLock::Guard guard(partition->lock); |
| 1333 | 1330 |
| 1334 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1331 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1335 const PartitionBucket* bucket = &partition->buckets[i]; | 1332 const PartitionBucket* bucket = &partition->buckets[i]; |
| 1336 // Don't report the pseudo buckets that the generic allocator sets up in | 1333 // Don't report the pseudo buckets that the generic allocator sets u
p in |
| 1337 // order to preserve a fast size->bucket map (see | 1334 // order to preserve a fast size->bucket map (see |
| 1338 // partitionAllocGenericInit for details). | 1335 // partitionAllocGenericInit for details). |
| 1339 if (!bucket->activePagesHead) | 1336 if (!bucket->activePagesHead) |
| 1340 bucketStats[i].isValid = false; | 1337 bucketStats[i].isValid = false; |
| 1341 else | 1338 else |
| 1342 partitionDumpBucketStats(&bucketStats[i], bucket); | 1339 partitionDumpBucketStats(&bucketStats[i], bucket); |
| 1340 } |
| 1341 |
| 1342 for (PartitionDirectMapExtent* extent = partition->directMapList; extent
; extent = extent->nextExtent) { |
| 1343 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == exte
nt); |
| 1344 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotS
ize; |
| 1345 ++numDirectMappedAllocations; |
| 1346 if (numDirectMappedAllocations == kMaxReportableDirectMaps) |
| 1347 break; |
| 1348 } |
| 1343 } | 1349 } |
| 1344 | 1350 |
| 1345 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; ex
tent = extent->nextExtent) { | |
| 1346 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent); | |
| 1347 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; | |
| 1348 ++numDirectMappedAllocations; | |
| 1349 if (numDirectMappedAllocations == kMaxReportableDirectMaps) | |
| 1350 break; | |
| 1351 } | |
| 1352 | |
| 1353 spinLockUnlock(&partition->lock); | |
| 1354 | |
| 1355 // partitionsDumpBucketStats is called after collecting stats because it | 1351 // partitionsDumpBucketStats is called after collecting stats because it |
| 1356 // can try to allocate using PartitionAllocGeneric and it can't obtain the | 1352 // can try to allocate using PartitionAllocGeneric and it can't obtain the |
| 1357 // lock. | 1353 // lock. |
| 1358 PartitionMemoryStats partitionStats = { 0 }; | 1354 PartitionMemoryStats partitionStats = { 0 }; |
| 1359 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages + partit
ion->totalSizeOfDirectMappedPages; | 1355 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages + partit
ion->totalSizeOfDirectMappedPages; |
| 1360 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; | 1356 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; |
| 1361 for (size_t i = 0; i < kGenericNumBuckets; ++i) { | 1357 for (size_t i = 0; i < kGenericNumBuckets; ++i) { |
| 1362 if (bucketStats[i].isValid) { | 1358 if (bucketStats[i].isValid) { |
| 1363 partitionStats.totalResidentBytes += bucketStats[i].residentBytes; | 1359 partitionStats.totalResidentBytes += bucketStats[i].residentBytes; |
| 1364 partitionStats.totalActiveBytes += bucketStats[i].activeBytes; | 1360 partitionStats.totalActiveBytes += bucketStats[i].activeBytes; |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1413 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBy
tes; | 1409 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBy
tes; |
| 1414 if (!isLightDump) | 1410 if (!isLightDump) |
| 1415 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &
memoryStats[i]); | 1411 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &
memoryStats[i]); |
| 1416 } | 1412 } |
| 1417 } | 1413 } |
| 1418 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); | 1414 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); |
| 1419 } | 1415 } |
| 1420 | 1416 |
| 1421 } // namespace WTF | 1417 } // namespace WTF |
| 1422 | 1418 |
| OLD | NEW |