Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(791)

Side by Side Diff: third_party/WebKit/Source/wtf/PartitionAlloc.cpp

Issue 1502023002: Revert of Switch wtf/SpinLock to std::atomic (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
48 static_assert(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, "PartitionPa ge should not be too big"); 48 static_assert(sizeof(WTF::PartitionPage) <= WTF::kPageMetadataSize, "PartitionPa ge should not be too big");
49 static_assert(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, "Partition Bucket should not be too big"); 49 static_assert(sizeof(WTF::PartitionBucket) <= WTF::kPageMetadataSize, "Partition Bucket should not be too big");
50 static_assert(sizeof(WTF::PartitionSuperPageExtentEntry) <= WTF::kPageMetadataSi ze, "PartitionSuperPageExtentEntry should not be too big"); 50 static_assert(sizeof(WTF::PartitionSuperPageExtentEntry) <= WTF::kPageMetadataSi ze, "PartitionSuperPageExtentEntry should not be too big");
51 static_assert(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= WT F::kSystemPageSize, "page metadata fits in hole"); 51 static_assert(WTF::kPageMetadataSize * WTF::kNumPartitionPagesPerSuperPage <= WT F::kSystemPageSize, "page metadata fits in hole");
52 // Check that some of our zanier calculations worked out as expected. 52 // Check that some of our zanier calculations worked out as expected.
53 static_assert(WTF::kGenericSmallestBucket == 8, "generic smallest bucket"); 53 static_assert(WTF::kGenericSmallestBucket == 8, "generic smallest bucket");
54 static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed"); 54 static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed");
55 55
56 namespace WTF { 56 namespace WTF {
57 57
58 SpinLock PartitionRootBase::gInitializedLock; 58 int PartitionRootBase::gInitializedLock = 0;
59 bool PartitionRootBase::gInitialized = false; 59 bool PartitionRootBase::gInitialized = false;
60 PartitionPage PartitionRootBase::gSeedPage; 60 PartitionPage PartitionRootBase::gSeedPage;
61 PartitionBucket PartitionRootBase::gPagedBucket; 61 PartitionBucket PartitionRootBase::gPagedBucket;
62 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; 62 void (*PartitionRootBase::gOomHandlingFunction)() = nullptr;
63 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = nul lptr; 63 PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = nul lptr;
64 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; 64 PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr;
65 65
66 static uint16_t partitionBucketNumSystemPages(size_t size) 66 static uint16_t partitionBucketNumSystemPages(size_t size)
67 { 67 {
68 // This works out reasonably for the current bucket sizes of the generic 68 // This works out reasonably for the current bucket sizes of the generic
(...skipping 28 matching lines...) Expand all
97 bestPages = i; 97 bestPages = i;
98 } 98 }
99 } 99 }
100 ASSERT(bestPages > 0); 100 ASSERT(bestPages > 0);
101 return bestPages; 101 return bestPages;
102 } 102 }
103 103
104 static void partitionAllocBaseInit(PartitionRootBase* root) 104 static void partitionAllocBaseInit(PartitionRootBase* root)
105 { 105 {
106 ASSERT(!root->initialized); 106 ASSERT(!root->initialized);
107 { 107
108 SpinLock::Guard guard(PartitionRootBase::gInitializedLock); 108 spinLockLock(&PartitionRootBase::gInitializedLock);
109 if (!PartitionRootBase::gInitialized) { 109 if (!PartitionRootBase::gInitialized) {
110 PartitionRootBase::gInitialized = true; 110 PartitionRootBase::gInitialized = true;
111 // We mark the seed page as free to make sure it is skipped by our 111 // We mark the seed page as free to make sure it is skipped by our
112 // logic to find a new active page. 112 // logic to find a new active page.
113 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGene ric::gSeedPage; 113 PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric: :gSeedPage;
114 }
115 } 114 }
115 spinLockUnlock(&PartitionRootBase::gInitializedLock);
116 116
117 root->initialized = true; 117 root->initialized = true;
118 root->totalSizeOfCommittedPages = 0; 118 root->totalSizeOfCommittedPages = 0;
119 root->totalSizeOfSuperPages = 0; 119 root->totalSizeOfSuperPages = 0;
120 root->totalSizeOfDirectMappedPages = 0; 120 root->totalSizeOfDirectMappedPages = 0;
121 root->nextSuperPage = 0; 121 root->nextSuperPage = 0;
122 root->nextPartitionPage = 0; 122 root->nextPartitionPage = 0;
123 root->nextPartitionPageEnd = 0; 123 root->nextPartitionPageEnd = 0;
124 root->firstExtent = 0; 124 root->firstExtent = 0;
125 root->currentExtent = 0; 125 root->currentExtent = 0;
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
159 if (!i) 159 if (!i)
160 bucket->slotSize = kAllocationGranularity; 160 bucket->slotSize = kAllocationGranularity;
161 else 161 else
162 bucket->slotSize = i << kBucketShift; 162 bucket->slotSize = i << kBucketShift;
163 partitionBucketInitBase(bucket, root); 163 partitionBucketInitBase(bucket, root);
164 } 164 }
165 } 165 }
166 166
167 void partitionAllocGenericInit(PartitionRootGeneric* root) 167 void partitionAllocGenericInit(PartitionRootGeneric* root)
168 { 168 {
169 SpinLock::Guard guard(root->lock); 169 spinLockLock(&root->lock);
170 170
171 partitionAllocBaseInit(root); 171 partitionAllocBaseInit(root);
172 172
173 // Precalculate some shift and mask constants used in the hot path. 173 // Precalculate some shift and mask constants used in the hot path.
174 // Example: malloc(41) == 101001 binary. 174 // Example: malloc(41) == 101001 binary.
175 // Order is 6 (1 << 6-1)==32 is highest bit set. 175 // Order is 6 (1 << 6-1)==32 is highest bit set.
176 // orderIndex is the next three MSB == 010 == 2. 176 // orderIndex is the next three MSB == 010 == 2.
177 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 f or the subOrderIndex). 177 // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 f or the subOrderIndex).
178 size_t order; 178 size_t order;
179 for (order = 0; order <= kBitsPerSizet; ++order) { 179 for (order = 0; order <= kBitsPerSizet; ++order) {
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
236 *bucketPtr++ = validBucket; 236 *bucketPtr++ = validBucket;
237 bucket++; 237 bucket++;
238 } 238 }
239 } 239 }
240 } 240 }
241 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets); 241 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets);
242 ASSERT(bucketPtr == &root->bucketLookups[0] + ((kBitsPerSizet + 1) * kGeneri cNumBucketsPerOrder)); 242 ASSERT(bucketPtr == &root->bucketLookups[0] + ((kBitsPerSizet + 1) * kGeneri cNumBucketsPerOrder));
243 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), 243 // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
244 // which tries to overflow to a non-existant order. 244 // which tries to overflow to a non-existant order.
245 *bucketPtr = &PartitionRootGeneric::gPagedBucket; 245 *bucketPtr = &PartitionRootGeneric::gPagedBucket;
246
247 spinLockUnlock(&root->lock);
246 } 248 }
247 249
248 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) 250 static bool partitionAllocShutdownBucket(PartitionBucket* bucket)
249 { 251 {
250 // Failure here indicates a memory leak. 252 // Failure here indicates a memory leak.
251 bool foundLeak = bucket->numFullPages; 253 bool foundLeak = bucket->numFullPages;
252 for (PartitionPage* page = bucket->activePagesHead; page; page = page->nextP age) 254 for (PartitionPage* page = bucket->activePagesHead; page; page = page->nextP age)
253 foundLeak |= (page->numAllocatedSlots > 0); 255 foundLeak |= (page->numAllocatedSlots > 0);
254 return foundLeak; 256 return foundLeak;
255 } 257 }
(...skipping 28 matching lines...) Expand all
284 for (i = 0; i < root->numBuckets; ++i) { 286 for (i = 0; i < root->numBuckets; ++i) {
285 PartitionBucket* bucket = &root->buckets()[i]; 287 PartitionBucket* bucket = &root->buckets()[i];
286 foundLeak |= partitionAllocShutdownBucket(bucket); 288 foundLeak |= partitionAllocShutdownBucket(bucket);
287 } 289 }
288 foundLeak |= partitionAllocBaseShutdown(root); 290 foundLeak |= partitionAllocBaseShutdown(root);
289 return !foundLeak; 291 return !foundLeak;
290 } 292 }
291 293
292 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) 294 bool partitionAllocGenericShutdown(PartitionRootGeneric* root)
293 { 295 {
294 SpinLock::Guard guard(root->lock); 296 spinLockLock(&root->lock);
295 bool foundLeak = false; 297 bool foundLeak = false;
296 size_t i; 298 size_t i;
297 for (i = 0; i < kGenericNumBuckets; ++i) { 299 for (i = 0; i < kGenericNumBuckets; ++i) {
298 PartitionBucket* bucket = &root->buckets[i]; 300 PartitionBucket* bucket = &root->buckets[i];
299 foundLeak |= partitionAllocShutdownBucket(bucket); 301 foundLeak |= partitionAllocShutdownBucket(bucket);
300 } 302 }
301 foundLeak |= partitionAllocBaseShutdown(root); 303 foundLeak |= partitionAllocBaseShutdown(root);
304 spinLockUnlock(&root->lock);
302 return !foundLeak; 305 return !foundLeak;
303 } 306 }
304 307
305 #if !CPU(64BIT) 308 #if !CPU(64BIT)
306 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() 309 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages()
307 { 310 {
308 IMMEDIATE_CRASH(); 311 IMMEDIATE_CRASH();
309 } 312 }
310 #endif 313 #endif
311 314
(...skipping 919 matching lines...) Expand 10 before | Expand all | Expand 10 after
1231 if (flags & PartitionPurgeDecommitEmptyPages) 1234 if (flags & PartitionPurgeDecommitEmptyPages)
1232 partitionDecommitEmptyPages(root); 1235 partitionDecommitEmptyPages(root);
1233 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages 1236 // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
1234 // here because that flag is only useful for allocations >= system page 1237 // here because that flag is only useful for allocations >= system page
1235 // size. We only have allocations that large inside generic partitions 1238 // size. We only have allocations that large inside generic partitions
1236 // at the moment. 1239 // at the moment.
1237 } 1240 }
1238 1241
1239 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) 1242 void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags)
1240 { 1243 {
1241 SpinLock::Guard guard(root->lock); 1244 spinLockLock(&root->lock);
1242 if (flags & PartitionPurgeDecommitEmptyPages) 1245 if (flags & PartitionPurgeDecommitEmptyPages)
1243 partitionDecommitEmptyPages(root); 1246 partitionDecommitEmptyPages(root);
1244 if (flags & PartitionPurgeDiscardUnusedSystemPages) { 1247 if (flags & PartitionPurgeDiscardUnusedSystemPages) {
1245 for (size_t i = 0; i < kGenericNumBuckets; ++i) { 1248 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
1246 PartitionBucket* bucket = &root->buckets[i]; 1249 PartitionBucket* bucket = &root->buckets[i];
1247 if (bucket->slotSize >= kSystemPageSize) 1250 if (bucket->slotSize >= kSystemPageSize)
1248 partitionPurgeBucket(bucket); 1251 partitionPurgeBucket(bucket);
1249 } 1252 }
1250 } 1253 }
1254 spinLockUnlock(&root->lock);
1251 } 1255 }
1252 1256
1253 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P artitionPage* page) 1257 static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const P artitionPage* page)
1254 { 1258 {
1255 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); 1259 uint16_t bucketNumSlots = partitionBucketSlots(page->bucket);
1256 1260
1257 if (partitionPageStateIsDecommitted(page)) { 1261 if (partitionPageStateIsDecommitted(page)) {
1258 ++statsOut->numDecommittedPages; 1262 ++statsOut->numDecommittedPages;
1259 return; 1263 return;
1260 } 1264 }
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1318 } 1322 }
1319 } 1323 }
1320 1324
1321 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* part itionName, bool isLightDump, PartitionStatsDumper* partitionStatsDumper) 1325 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* part itionName, bool isLightDump, PartitionStatsDumper* partitionStatsDumper)
1322 { 1326 {
1323 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; 1327 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets];
1324 static const size_t kMaxReportableDirectMaps = 4096; 1328 static const size_t kMaxReportableDirectMaps = 4096;
1325 uint32_t directMapLengths[kMaxReportableDirectMaps]; 1329 uint32_t directMapLengths[kMaxReportableDirectMaps];
1326 size_t numDirectMappedAllocations = 0; 1330 size_t numDirectMappedAllocations = 0;
1327 1331
1328 { 1332 spinLockLock(&partition->lock);
1329 SpinLock::Guard guard(partition->lock);
1330 1333
1331 for (size_t i = 0; i < kGenericNumBuckets; ++i) { 1334 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
1332 const PartitionBucket* bucket = &partition->buckets[i]; 1335 const PartitionBucket* bucket = &partition->buckets[i];
1333 // Don't report the pseudo buckets that the generic allocator sets u p in 1336 // Don't report the pseudo buckets that the generic allocator sets up in
1334 // order to preserve a fast size->bucket map (see 1337 // order to preserve a fast size->bucket map (see
1335 // partitionAllocGenericInit for details). 1338 // partitionAllocGenericInit for details).
1336 if (!bucket->activePagesHead) 1339 if (!bucket->activePagesHead)
1337 bucketStats[i].isValid = false; 1340 bucketStats[i].isValid = false;
1338 else 1341 else
1339 partitionDumpBucketStats(&bucketStats[i], bucket); 1342 partitionDumpBucketStats(&bucketStats[i], bucket);
1340 } 1343 }
1341 1344
1342 for (PartitionDirectMapExtent* extent = partition->directMapList; extent ; extent = extent->nextExtent) { 1345 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; ex tent = extent->nextExtent) {
1343 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == exte nt); 1346 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent);
1344 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotS ize; 1347 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize;
1345 ++numDirectMappedAllocations; 1348 ++numDirectMappedAllocations;
1346 if (numDirectMappedAllocations == kMaxReportableDirectMaps) 1349 if (numDirectMappedAllocations == kMaxReportableDirectMaps)
1347 break; 1350 break;
1348 }
1349 } 1351 }
1350 1352
1353 spinLockUnlock(&partition->lock);
1354
1351 // partitionsDumpBucketStats is called after collecting stats because it 1355 // partitionsDumpBucketStats is called after collecting stats because it
1352 // can try to allocate using PartitionAllocGeneric and it can't obtain the 1356 // can try to allocate using PartitionAllocGeneric and it can't obtain the
1353 // lock. 1357 // lock.
1354 PartitionMemoryStats partitionStats = { 0 }; 1358 PartitionMemoryStats partitionStats = { 0 };
1355 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages + partit ion->totalSizeOfDirectMappedPages; 1359 partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages + partit ion->totalSizeOfDirectMappedPages;
1356 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; 1360 partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages;
1357 for (size_t i = 0; i < kGenericNumBuckets; ++i) { 1361 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
1358 if (bucketStats[i].isValid) { 1362 if (bucketStats[i].isValid) {
1359 partitionStats.totalResidentBytes += bucketStats[i].residentBytes; 1363 partitionStats.totalResidentBytes += bucketStats[i].residentBytes;
1360 partitionStats.totalActiveBytes += bucketStats[i].activeBytes; 1364 partitionStats.totalActiveBytes += bucketStats[i].activeBytes;
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
1409 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBy tes; 1413 partitionStats.totalDiscardableBytes += memoryStats[i].discardableBy tes;
1410 if (!isLightDump) 1414 if (!isLightDump)
1411 partitionStatsDumper->partitionsDumpBucketStats(partitionName, & memoryStats[i]); 1415 partitionStatsDumper->partitionsDumpBucketStats(partitionName, & memoryStats[i]);
1412 } 1416 }
1413 } 1417 }
1414 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); 1418 partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats);
1415 } 1419 }
1416 1420
1417 } // namespace WTF 1421 } // namespace WTF
1418 1422
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/wtf/PartitionAlloc.h ('k') | third_party/WebKit/Source/wtf/Partitions.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698