Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(251)

Side by Side Diff: Source/wtf/PartitionAlloc.cpp

Issue 1181783005: PartitionAlloc: minor cleanups. (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « Source/wtf/PartitionAlloc.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after
199 partitionBucketInitBase(bucket, root); 199 partitionBucketInitBase(bucket, root);
200 // Disable psuedo buckets so that touching them faults. 200 // Disable psuedo buckets so that touching them faults.
201 if (currentSize % kGenericSmallestBucket) 201 if (currentSize % kGenericSmallestBucket)
202 bucket->activePagesHead = 0; 202 bucket->activePagesHead = 0;
203 currentSize += currentIncrement; 203 currentSize += currentIncrement;
204 ++bucket; 204 ++bucket;
205 } 205 }
206 currentIncrement <<= 1; 206 currentIncrement <<= 1;
207 } 207 }
208 ASSERT(currentSize == 1 << kGenericMaxBucketedOrder); 208 ASSERT(currentSize == 1 << kGenericMaxBucketedOrder);
209 ASSERT(bucket == &root->buckets[0] + (kGenericNumBucketedOrders * kGenericNu mBucketsPerOrder)); 209 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets);
210 210
211 // Then set up the fast size -> bucket lookup table. 211 // Then set up the fast size -> bucket lookup table.
212 bucket = &root->buckets[0]; 212 bucket = &root->buckets[0];
213 PartitionBucket** bucketPtr = &root->bucketLookups[0]; 213 PartitionBucket** bucketPtr = &root->bucketLookups[0];
214 for (order = 0; order <= kBitsPerSizet; ++order) { 214 for (order = 0; order <= kBitsPerSizet; ++order) {
215 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { 215 for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
216 if (order < kGenericMinBucketedOrder) { 216 if (order < kGenericMinBucketedOrder) {
217 // Use the bucket of the finest granularity for malloc(0) etc. 217 // Use the bucket of the finest granularity for malloc(0) etc.
218 *bucketPtr++ = &root->buckets[0]; 218 *bucketPtr++ = &root->buckets[0];
219 } else if (order > kGenericMaxBucketedOrder) { 219 } else if (order > kGenericMaxBucketedOrder) {
220 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket; 220 *bucketPtr++ = &PartitionRootGeneric::gPagedBucket;
221 } else { 221 } else {
222 PartitionBucket* validBucket = bucket; 222 PartitionBucket* validBucket = bucket;
223 // Skip over invalid buckets. 223 // Skip over invalid buckets.
224 while (validBucket->slotSize % kGenericSmallestBucket) 224 while (validBucket->slotSize % kGenericSmallestBucket)
225 validBucket++; 225 validBucket++;
226 *bucketPtr++ = validBucket; 226 *bucketPtr++ = validBucket;
227 bucket++; 227 bucket++;
228 } 228 }
229 } 229 }
230 } 230 }
231 ASSERT(bucket == &root->buckets[0] + (kGenericNumBucketedOrders * kGenericNu mBucketsPerOrder)); 231 ASSERT(bucket == &root->buckets[0] + kGenericNumBuckets);
232 ASSERT(bucketPtr == &root->bucketLookups[0] + ((kBitsPerSizet + 1) * kGeneri cNumBucketsPerOrder)); 232 ASSERT(bucketPtr == &root->bucketLookups[0] + ((kBitsPerSizet + 1) * kGeneri cNumBucketsPerOrder));
233 // And there's one last bucket lookup that will be hit for e.g. malloc(-1), 233 // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
234 // which tries to overflow to a non-existant order. 234 // which tries to overflow to a non-existant order.
235 *bucketPtr = &PartitionRootGeneric::gPagedBucket; 235 *bucketPtr = &PartitionRootGeneric::gPagedBucket;
236 } 236 }
237 237
238 static bool partitionAllocShutdownBucket(PartitionBucket* bucket) 238 static bool partitionAllocShutdownBucket(PartitionBucket* bucket)
239 { 239 {
240 // Failure here indicates a memory leak. 240 // Failure here indicates a memory leak.
241 bool foundLeak = bucket->numFullPages; 241 bool foundLeak = bucket->numFullPages;
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
276 foundLeak |= partitionAllocShutdownBucket(bucket); 276 foundLeak |= partitionAllocShutdownBucket(bucket);
277 } 277 }
278 foundLeak |= partitionAllocBaseShutdown(root); 278 foundLeak |= partitionAllocBaseShutdown(root);
279 return !foundLeak; 279 return !foundLeak;
280 } 280 }
281 281
282 bool partitionAllocGenericShutdown(PartitionRootGeneric* root) 282 bool partitionAllocGenericShutdown(PartitionRootGeneric* root)
283 { 283 {
284 bool foundLeak = false; 284 bool foundLeak = false;
285 size_t i; 285 size_t i;
286 for (i = 0; i < kGenericNumBucketedOrders * kGenericNumBucketsPerOrder; ++i) { 286 for (i = 0; i < kGenericNumBuckets; ++i) {
287 PartitionBucket* bucket = &root->buckets[i]; 287 PartitionBucket* bucket = &root->buckets[i];
288 foundLeak |= partitionAllocShutdownBucket(bucket); 288 foundLeak |= partitionAllocShutdownBucket(bucket);
289 } 289 }
290 foundLeak |= partitionAllocBaseShutdown(root); 290 foundLeak |= partitionAllocBaseShutdown(root);
291 return !foundLeak; 291 return !foundLeak;
292 } 292 }
293 293
294 #if !CPU(64BIT) 294 #if !CPU(64BIT)
295 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() 295 static NEVER_INLINE void partitionOutOfMemoryWithLotsOfUncommitedPages()
296 { 296 {
(...skipping 847 matching lines...) Expand 10 before | Expand all | Expand 10 after
1144 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { 1144 if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) {
1145 for (const PartitionPage* page = bucket->activePagesHead; page; page = p age->nextPage) { 1145 for (const PartitionPage* page = bucket->activePagesHead; page; page = p age->nextPage) {
1146 ASSERT(page != &PartitionRootGeneric::gSeedPage); 1146 ASSERT(page != &PartitionRootGeneric::gSeedPage);
1147 partitionDumpPageStats(statsOut, page); 1147 partitionDumpPageStats(statsOut, page);
1148 } 1148 }
1149 } 1149 }
1150 } 1150 }
1151 1151
1152 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* part itionName, PartitionStatsDumper* partitionStatsDumper) 1152 void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* part itionName, PartitionStatsDumper* partitionStatsDumper)
1153 { 1153 {
1154 const size_t partitionNumBuckets = kGenericNumBucketedOrders * kGenericNumBu cketsPerOrder; 1154 PartitionBucketMemoryStats bucketStats[kGenericNumBuckets];
1155 spinLockLock(&partition->lock); 1155 spinLockLock(&partition->lock);
1156 PartitionBucketMemoryStats bucketStats[partitionNumBuckets]; 1156 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
1157 for (size_t i = 0; i < partitionNumBuckets; ++i) {
1158 const PartitionBucket* bucket = &partition->buckets[i]; 1157 const PartitionBucket* bucket = &partition->buckets[i];
1159 // Don't report the pseudo buckets that the generic allocator sets up in 1158 // Don't report the pseudo buckets that the generic allocator sets up in
1160 // order to preserve a fast size->bucket map (see 1159 // order to preserve a fast size->bucket map (see
1161 // partitionAllocGenericInit for details). 1160 // partitionAllocGenericInit for details).
1162 if (!bucket->activePagesHead) 1161 if (!bucket->activePagesHead)
1163 bucketStats[i].isValid = false; 1162 bucketStats[i].isValid = false;
1164 else 1163 else
1165 partitionDumpBucketStats(&bucketStats[i], bucket); 1164 partitionDumpBucketStats(&bucketStats[i], bucket);
1166 } 1165 }
1167 1166
1168 static const size_t kMaxReportableDirectMaps = 4096; 1167 static const size_t kMaxReportableDirectMaps = 4096;
1169 uint32_t directMapLengths[kMaxReportableDirectMaps]; 1168 uint32_t directMapLengths[kMaxReportableDirectMaps];
1170 size_t numDirectMappedAllocations = 0; 1169 size_t numDirectMappedAllocations = 0;
1171 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; ex tent = extent->nextExtent) { 1170 for (PartitionDirectMapExtent* extent = partition->directMapList; extent; ex tent = extent->nextExtent) {
1172 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent); 1171 ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent);
1173 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; 1172 directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize;
1174 ++numDirectMappedAllocations; 1173 ++numDirectMappedAllocations;
1175 if (numDirectMappedAllocations == kMaxReportableDirectMaps) 1174 if (numDirectMappedAllocations == kMaxReportableDirectMaps)
1176 break; 1175 break;
1177 } 1176 }
1178 1177
1179 spinLockUnlock(&partition->lock); 1178 spinLockUnlock(&partition->lock);
1180 1179
1181 // partitionsDumpBucketStats is called after collecting stats because it 1180 // partitionsDumpBucketStats is called after collecting stats because it
1182 // can try to allocate using PartitionAllocGeneric and it can't obtain the 1181 // can try to allocate using PartitionAllocGeneric and it can't obtain the
1183 // lock. 1182 // lock.
1184 for (size_t i = 0; i < partitionNumBuckets; ++i) { 1183 for (size_t i = 0; i < kGenericNumBuckets; ++i) {
1185 if (bucketStats[i].isValid) 1184 if (bucketStats[i].isValid)
1186 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &buck etStats[i]); 1185 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &buck etStats[i]);
1187 } 1186 }
1188 for (size_t i = 0; i < numDirectMappedAllocations; ++i) { 1187 for (size_t i = 0; i < numDirectMappedAllocations; ++i) {
1189 PartitionBucketMemoryStats stats; 1188 PartitionBucketMemoryStats stats;
1190 memset(&stats, '\0', sizeof(stats)); 1189 memset(&stats, '\0', sizeof(stats));
1191 stats.isValid = true; 1190 stats.isValid = true;
1192 stats.isDirectMap = true; 1191 stats.isDirectMap = true;
1193 stats.numFullPages = 1; 1192 stats.numFullPages = 1;
1194 uint32_t size = directMapLengths[i]; 1193 uint32_t size = directMapLengths[i];
(...skipping 18 matching lines...) Expand all
1213 // partitionsDumpBucketStats is called after collecting stats because it 1212 // partitionsDumpBucketStats is called after collecting stats because it
1214 // can use PartitionAlloc to allocate and this can affect the statistics. 1213 // can use PartitionAlloc to allocate and this can affect the statistics.
1215 for (size_t i = 0; i < partitionNumBuckets; ++i) { 1214 for (size_t i = 0; i < partitionNumBuckets; ++i) {
1216 if (memoryStats[i].isValid) 1215 if (memoryStats[i].isValid)
1217 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &memo ryStats[i]); 1216 partitionStatsDumper->partitionsDumpBucketStats(partitionName, &memo ryStats[i]);
1218 } 1217 }
1219 } 1218 }
1220 1219
1221 } // namespace WTF 1220 } // namespace WTF
1222 1221
OLDNEW
« no previous file with comments | « Source/wtf/PartitionAlloc.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698