| Index: third_party/WebKit/Source/wtf/PartitionAlloc.cpp
|
| diff --git a/third_party/WebKit/Source/wtf/PartitionAlloc.cpp b/third_party/WebKit/Source/wtf/PartitionAlloc.cpp
|
| index 8e6cdd9f4be257ac5b09fe91a7a32e5fe1482061..a71dd0053b92d34dbe75649ed3be8605548749e8 100644
|
| --- a/third_party/WebKit/Source/wtf/PartitionAlloc.cpp
|
| +++ b/third_party/WebKit/Source/wtf/PartitionAlloc.cpp
|
| @@ -55,7 +55,7 @@ static_assert(WTF::kGenericMaxBucketed == 983040, "generic max bucketed");
|
|
|
| namespace WTF {
|
|
|
| -int PartitionRootBase::gInitializedLock = 0;
|
| +SpinLock PartitionRootBase::gInitializedLock;
|
| bool PartitionRootBase::gInitialized = false;
|
| PartitionPage PartitionRootBase::gSeedPage;
|
| PartitionBucket PartitionRootBase::gPagedBucket;
|
| @@ -104,15 +104,15 @@ static uint16_t partitionBucketNumSystemPages(size_t size)
|
| static void partitionAllocBaseInit(PartitionRootBase* root)
|
| {
|
| ASSERT(!root->initialized);
|
| -
|
| - spinLockLock(&PartitionRootBase::gInitializedLock);
|
| - if (!PartitionRootBase::gInitialized) {
|
| - PartitionRootBase::gInitialized = true;
|
| - // We mark the seed page as free to make sure it is skipped by our
|
| - // logic to find a new active page.
|
| - PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric::gSeedPage;
|
| + {
|
| + SpinLock::Guard guard(PartitionRootBase::gInitializedLock);
|
| + if (!PartitionRootBase::gInitialized) {
|
| + PartitionRootBase::gInitialized = true;
|
| + // We mark the seed page as free to make sure it is skipped by our
|
| + // logic to find a new active page.
|
| + PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric::gSeedPage;
|
| + }
|
| }
|
| - spinLockUnlock(&PartitionRootBase::gInitializedLock);
|
|
|
| root->initialized = true;
|
| root->totalSizeOfCommittedPages = 0;
|
| @@ -166,7 +166,7 @@ void partitionAllocInit(PartitionRoot* root, size_t numBuckets, size_t maxAlloca
|
|
|
| void partitionAllocGenericInit(PartitionRootGeneric* root)
|
| {
|
| - spinLockLock(&root->lock);
|
| + SpinLock::Guard guard(root->lock);
|
|
|
| partitionAllocBaseInit(root);
|
|
|
| @@ -243,8 +243,6 @@ void partitionAllocGenericInit(PartitionRootGeneric* root)
|
| // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
|
| // which tries to overflow to a non-existant order.
|
| *bucketPtr = &PartitionRootGeneric::gPagedBucket;
|
| -
|
| - spinLockUnlock(&root->lock);
|
| }
|
|
|
| static bool partitionAllocShutdownBucket(PartitionBucket* bucket)
|
| @@ -293,7 +291,7 @@ bool partitionAllocShutdown(PartitionRoot* root)
|
|
|
| bool partitionAllocGenericShutdown(PartitionRootGeneric* root)
|
| {
|
| - spinLockLock(&root->lock);
|
| + SpinLock::Guard guard(root->lock);
|
| bool foundLeak = false;
|
| size_t i;
|
| for (i = 0; i < kGenericNumBuckets; ++i) {
|
| @@ -301,7 +299,6 @@ bool partitionAllocGenericShutdown(PartitionRootGeneric* root)
|
| foundLeak |= partitionAllocShutdownBucket(bucket);
|
| }
|
| foundLeak |= partitionAllocBaseShutdown(root);
|
| - spinLockUnlock(&root->lock);
|
| return !foundLeak;
|
| }
|
|
|
| @@ -1241,7 +1238,7 @@ void partitionPurgeMemory(PartitionRoot* root, int flags)
|
|
|
| void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags)
|
| {
|
| - spinLockLock(&root->lock);
|
| + SpinLock::Guard guard(root->lock);
|
| if (flags & PartitionPurgeDecommitEmptyPages)
|
| partitionDecommitEmptyPages(root);
|
| if (flags & PartitionPurgeDiscardUnusedSystemPages) {
|
| @@ -1251,7 +1248,6 @@ void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags)
|
| partitionPurgeBucket(bucket);
|
| }
|
| }
|
| - spinLockUnlock(&root->lock);
|
| }
|
|
|
| static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const PartitionPage* page)
|
| @@ -1329,29 +1325,29 @@ void partitionDumpStatsGeneric(PartitionRootGeneric* partition, const char* part
|
| uint32_t directMapLengths[kMaxReportableDirectMaps];
|
| size_t numDirectMappedAllocations = 0;
|
|
|
| - spinLockLock(&partition->lock);
|
| + {
|
| + SpinLock::Guard guard(partition->lock);
|
|
|
| - for (size_t i = 0; i < kGenericNumBuckets; ++i) {
|
| - const PartitionBucket* bucket = &partition->buckets[i];
|
| - // Don't report the pseudo buckets that the generic allocator sets up in
|
| - // order to preserve a fast size->bucket map (see
|
| - // partitionAllocGenericInit for details).
|
| - if (!bucket->activePagesHead)
|
| - bucketStats[i].isValid = false;
|
| - else
|
| - partitionDumpBucketStats(&bucketStats[i], bucket);
|
| - }
|
| + for (size_t i = 0; i < kGenericNumBuckets; ++i) {
|
| + const PartitionBucket* bucket = &partition->buckets[i];
|
| + // Don't report the pseudo buckets that the generic allocator sets up in
|
| + // order to preserve a fast size->bucket map (see
|
| + // partitionAllocGenericInit for details).
|
| + if (!bucket->activePagesHead)
|
| + bucketStats[i].isValid = false;
|
| + else
|
| + partitionDumpBucketStats(&bucketStats[i], bucket);
|
| + }
|
|
|
| - for (PartitionDirectMapExtent* extent = partition->directMapList; extent; extent = extent->nextExtent) {
|
| - ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent);
|
| - directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize;
|
| - ++numDirectMappedAllocations;
|
| - if (numDirectMappedAllocations == kMaxReportableDirectMaps)
|
| - break;
|
| + for (PartitionDirectMapExtent* extent = partition->directMapList; extent; extent = extent->nextExtent) {
|
| + ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent);
|
| + directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize;
|
| + ++numDirectMappedAllocations;
|
| + if (numDirectMappedAllocations == kMaxReportableDirectMaps)
|
| + break;
|
| + }
|
| }
|
|
|
| - spinLockUnlock(&partition->lock);
|
| -
|
| // partitionsDumpBucketStats is called after collecting stats because it
|
| // can try to allocate using PartitionAllocGeneric and it can't obtain the
|
| // lock.
|
|
|