| Index: third_party/WebKit/Source/wtf/PartitionAlloc.cpp
|
| diff --git a/third_party/WebKit/Source/wtf/PartitionAlloc.cpp b/third_party/WebKit/Source/wtf/PartitionAlloc.cpp
|
| index a71dd0053b92d34dbe75649ed3be8605548749e8..8e6cdd9f4be257ac5b09fe91a7a32e5fe1482061 100644
|
| --- a/third_party/WebKit/Source/wtf/PartitionAlloc.cpp
|
| +++ b/third_party/WebKit/Source/wtf/PartitionAlloc.cpp
|
| @@ -55,7 +55,7 @@
|
|
|
| namespace WTF {
|
|
|
| -SpinLock PartitionRootBase::gInitializedLock;
|
| +int PartitionRootBase::gInitializedLock = 0;
|
| bool PartitionRootBase::gInitialized = false;
|
| PartitionPage PartitionRootBase::gSeedPage;
|
| PartitionBucket PartitionRootBase::gPagedBucket;
|
| @@ -104,15 +104,15 @@
|
| static void partitionAllocBaseInit(PartitionRootBase* root)
|
| {
|
| ASSERT(!root->initialized);
|
| - {
|
| - SpinLock::Guard guard(PartitionRootBase::gInitializedLock);
|
| - if (!PartitionRootBase::gInitialized) {
|
| - PartitionRootBase::gInitialized = true;
|
| - // We mark the seed page as free to make sure it is skipped by our
|
| - // logic to find a new active page.
|
| - PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric::gSeedPage;
|
| - }
|
| - }
|
| +
|
| + spinLockLock(&PartitionRootBase::gInitializedLock);
|
| + if (!PartitionRootBase::gInitialized) {
|
| + PartitionRootBase::gInitialized = true;
|
| + // We mark the seed page as free to make sure it is skipped by our
|
| + // logic to find a new active page.
|
| + PartitionRootBase::gPagedBucket.activePagesHead = &PartitionRootGeneric::gSeedPage;
|
| + }
|
| + spinLockUnlock(&PartitionRootBase::gInitializedLock);
|
|
|
| root->initialized = true;
|
| root->totalSizeOfCommittedPages = 0;
|
| @@ -166,7 +166,7 @@
|
|
|
| void partitionAllocGenericInit(PartitionRootGeneric* root)
|
| {
|
| - SpinLock::Guard guard(root->lock);
|
| + spinLockLock(&root->lock);
|
|
|
| partitionAllocBaseInit(root);
|
|
|
| @@ -243,6 +243,8 @@
|
| // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
|
| // which tries to overflow to a non-existant order.
|
| *bucketPtr = &PartitionRootGeneric::gPagedBucket;
|
| +
|
| + spinLockUnlock(&root->lock);
|
| }
|
|
|
| static bool partitionAllocShutdownBucket(PartitionBucket* bucket)
|
| @@ -291,7 +293,7 @@
|
|
|
| bool partitionAllocGenericShutdown(PartitionRootGeneric* root)
|
| {
|
| - SpinLock::Guard guard(root->lock);
|
| + spinLockLock(&root->lock);
|
| bool foundLeak = false;
|
| size_t i;
|
| for (i = 0; i < kGenericNumBuckets; ++i) {
|
| @@ -299,6 +301,7 @@
|
| foundLeak |= partitionAllocShutdownBucket(bucket);
|
| }
|
| foundLeak |= partitionAllocBaseShutdown(root);
|
| + spinLockUnlock(&root->lock);
|
| return !foundLeak;
|
| }
|
|
|
| @@ -1238,7 +1241,7 @@
|
|
|
| void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags)
|
| {
|
| - SpinLock::Guard guard(root->lock);
|
| + spinLockLock(&root->lock);
|
| if (flags & PartitionPurgeDecommitEmptyPages)
|
| partitionDecommitEmptyPages(root);
|
| if (flags & PartitionPurgeDiscardUnusedSystemPages) {
|
| @@ -1248,6 +1251,7 @@
|
| partitionPurgeBucket(bucket);
|
| }
|
| }
|
| + spinLockUnlock(&root->lock);
|
| }
|
|
|
| static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, const PartitionPage* page)
|
| @@ -1325,28 +1329,28 @@
|
| uint32_t directMapLengths[kMaxReportableDirectMaps];
|
| size_t numDirectMappedAllocations = 0;
|
|
|
| - {
|
| - SpinLock::Guard guard(partition->lock);
|
| -
|
| - for (size_t i = 0; i < kGenericNumBuckets; ++i) {
|
| - const PartitionBucket* bucket = &partition->buckets[i];
|
| - // Don't report the pseudo buckets that the generic allocator sets up in
|
| - // order to preserve a fast size->bucket map (see
|
| - // partitionAllocGenericInit for details).
|
| - if (!bucket->activePagesHead)
|
| - bucketStats[i].isValid = false;
|
| - else
|
| - partitionDumpBucketStats(&bucketStats[i], bucket);
|
| - }
|
| -
|
| - for (PartitionDirectMapExtent* extent = partition->directMapList; extent; extent = extent->nextExtent) {
|
| - ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent);
|
| - directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize;
|
| - ++numDirectMappedAllocations;
|
| - if (numDirectMappedAllocations == kMaxReportableDirectMaps)
|
| - break;
|
| - }
|
| - }
|
| + spinLockLock(&partition->lock);
|
| +
|
| + for (size_t i = 0; i < kGenericNumBuckets; ++i) {
|
| + const PartitionBucket* bucket = &partition->buckets[i];
|
| + // Don't report the pseudo buckets that the generic allocator sets up in
|
| + // order to preserve a fast size->bucket map (see
|
| + // partitionAllocGenericInit for details).
|
| + if (!bucket->activePagesHead)
|
| + bucketStats[i].isValid = false;
|
| + else
|
| + partitionDumpBucketStats(&bucketStats[i], bucket);
|
| + }
|
| +
|
| + for (PartitionDirectMapExtent* extent = partition->directMapList; extent; extent = extent->nextExtent) {
|
| + ASSERT(!extent->nextExtent || extent->nextExtent->prevExtent == extent);
|
| + directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize;
|
| + ++numDirectMappedAllocations;
|
| + if (numDirectMappedAllocations == kMaxReportableDirectMaps)
|
| + break;
|
| + }
|
| +
|
| + spinLockUnlock(&partition->lock);
|
|
|
| // partitionsDumpBucketStats is called after collecting stats because it
|
| // can try to allocate using PartitionAllocGeneric and it can't obtain the
|
|
|