OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
302 char* nextSuperPage; | 302 char* nextSuperPage; |
303 char* nextPartitionPage; | 303 char* nextPartitionPage; |
304 char* nextPartitionPageEnd; | 304 char* nextPartitionPageEnd; |
305 PartitionSuperPageExtentEntry* currentExtent; | 305 PartitionSuperPageExtentEntry* currentExtent; |
306 PartitionSuperPageExtentEntry* firstExtent; | 306 PartitionSuperPageExtentEntry* firstExtent; |
307 PartitionDirectMapExtent* directMapList; | 307 PartitionDirectMapExtent* directMapList; |
308 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; | 308 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; |
309 int16_t globalEmptyPageRingIndex; | 309 int16_t globalEmptyPageRingIndex; |
310 uintptr_t invertedSelf; | 310 uintptr_t invertedSelf; |
311 | 311 |
312 static int gInitializedLock; | 312 static SpinLock gInitializedLock; |
313 static bool gInitialized; | 313 static bool gInitialized; |
314 // gSeedPage is used as a sentinel to indicate that there is no page | 314 // gSeedPage is used as a sentinel to indicate that there is no page |
315 // in the active page list. We can use nullptr, but in that case we need | 315 // in the active page list. We can use nullptr, but in that case we need |
316 // to add a null-check branch to the hot allocation path. We want to avoid | 316 // to add a null-check branch to the hot allocation path. We want to avoid |
317 // that. | 317 // that. |
318 static PartitionPage gSeedPage; | 318 static PartitionPage gSeedPage; |
319 static PartitionBucket gPagedBucket; | 319 static PartitionBucket gPagedBucket; |
320 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. | 320 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. |
321 static void (*gOomHandlingFunction)(); | 321 static void (*gOomHandlingFunction)(); |
322 }; | 322 }; |
323 | 323 |
324 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. | 324 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. |
325 struct PartitionRoot : public PartitionRootBase { | 325 struct PartitionRoot : public PartitionRootBase { |
326 // The PartitionAlloc templated class ensures the following is correct. | 326 // The PartitionAlloc templated class ensures the following is correct. |
327 ALWAYS_INLINE PartitionBucket* buckets() { return reinterpret_cast<Partition
Bucket*>(this + 1); } | 327 ALWAYS_INLINE PartitionBucket* buckets() { return reinterpret_cast<Partition
Bucket*>(this + 1); } |
328 ALWAYS_INLINE const PartitionBucket* buckets() const { return reinterpret_ca
st<const PartitionBucket*>(this + 1); } | 328 ALWAYS_INLINE const PartitionBucket* buckets() const { return reinterpret_ca
st<const PartitionBucket*>(this + 1); } |
329 }; | 329 }; |
330 | 330 |
331 // Never instantiate a PartitionRootGeneric directly, instead use PartitionAlloc
atorGeneric. | 331 // Never instantiate a PartitionRootGeneric directly, instead use PartitionAlloc
atorGeneric. |
332 struct PartitionRootGeneric : public PartitionRootBase { | 332 struct PartitionRootGeneric : public PartitionRootBase { |
333 int lock; | 333 SpinLock lock; |
334 // Some pre-computed constants. | 334 // Some pre-computed constants. |
335 size_t orderIndexShifts[kBitsPerSizet + 1]; | 335 size_t orderIndexShifts[kBitsPerSizet + 1]; |
336 size_t orderSubIndexMasks[kBitsPerSizet + 1]; | 336 size_t orderSubIndexMasks[kBitsPerSizet + 1]; |
337 // The bucket lookup table lets us map a size_t to a bucket quickly. | 337 // The bucket lookup table lets us map a size_t to a bucket quickly. |
338 // The trailing +1 caters for the overflow case for very large allocation si
zes. | 338 // The trailing +1 caters for the overflow case for very large allocation si
zes. |
339 // It is one flat array instead of a 2D array because in the 2D world, we'd | 339 // It is one flat array instead of a 2D array because in the 2D world, we'd |
340 // need to index array[blah][max+1] which risks undefined behavior. | 340 // need to index array[blah][max+1] which risks undefined behavior. |
341 PartitionBucket* bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerO
rder) + 1]; | 341 PartitionBucket* bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerO
rder) + 1]; |
342 PartitionBucket buckets[kGenericNumBuckets]; | 342 PartitionBucket buckets[kGenericNumBuckets]; |
343 }; | 343 }; |
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
745 { | 745 { |
746 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 746 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
747 void* result = malloc(size); | 747 void* result = malloc(size); |
748 RELEASE_ASSERT(result); | 748 RELEASE_ASSERT(result); |
749 return result; | 749 return result; |
750 #else | 750 #else |
751 ASSERT(root->initialized); | 751 ASSERT(root->initialized); |
752 size_t requestedSize = size; | 752 size_t requestedSize = size; |
753 size = partitionCookieSizeAdjustAdd(size); | 753 size = partitionCookieSizeAdjustAdd(size); |
754 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); | 754 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); |
755 spinLockLock(&root->lock); | 755 void* ret = nullptr; |
756 // TODO(bashi): Remove following RELEAE_ASSERT()s once we find the cause of | 756 { |
757 // http://crbug.com/514141 | 757 SpinLock::Guard guard(root->lock); |
| 758 // TODO(bashi): Remove following RELEAE_ASSERT()s once we find the cause
of |
| 759 // http://crbug.com/514141 |
758 #if OS(ANDROID) | 760 #if OS(ANDROID) |
759 RELEASE_ASSERT(bucket >= &root->buckets[0] || bucket == &PartitionRootGeneri
c::gPagedBucket); | 761 RELEASE_ASSERT(bucket >= &root->buckets[0] || bucket == &PartitionRootGe
neric::gPagedBucket); |
760 RELEASE_ASSERT(bucket <= &root->buckets[kGenericNumBuckets - 1] || bucket ==
&PartitionRootGeneric::gPagedBucket); | 762 RELEASE_ASSERT(bucket <= &root->buckets[kGenericNumBuckets - 1] || bucke
t == &PartitionRootGeneric::gPagedBucket); |
761 RELEASE_ASSERT(root->initialized); | 763 RELEASE_ASSERT(root->initialized); |
762 #endif | 764 #endif |
763 void* ret = partitionBucketAlloc(root, flags, size, bucket); | 765 ret = partitionBucketAlloc(root, flags, size, bucket); |
764 spinLockUnlock(&root->lock); | 766 } |
765 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); | 767 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); |
766 return ret; | 768 return ret; |
767 #endif | 769 #endif |
768 } | 770 } |
769 | 771 |
770 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, size_t siz
e, const char* typeName) | 772 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, size_t siz
e, const char* typeName) |
771 { | 773 { |
772 return partitionAllocGenericFlags(root, 0, size, typeName); | 774 return partitionAllocGenericFlags(root, 0, size, typeName); |
773 } | 775 } |
774 | 776 |
775 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) | 777 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) |
776 { | 778 { |
777 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 779 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
778 free(ptr); | 780 free(ptr); |
779 #else | 781 #else |
780 ASSERT(root->initialized); | 782 ASSERT(root->initialized); |
781 | 783 |
782 if (UNLIKELY(!ptr)) | 784 if (UNLIKELY(!ptr)) |
783 return; | 785 return; |
784 | 786 |
785 PartitionAllocHooks::freeHookIfEnabled(ptr); | 787 PartitionAllocHooks::freeHookIfEnabled(ptr); |
786 ptr = partitionCookieFreePointerAdjust(ptr); | 788 ptr = partitionCookieFreePointerAdjust(ptr); |
787 ASSERT(partitionPointerIsValid(ptr)); | 789 ASSERT(partitionPointerIsValid(ptr)); |
788 PartitionPage* page = partitionPointerToPage(ptr); | 790 PartitionPage* page = partitionPointerToPage(ptr); |
789 spinLockLock(&root->lock); | 791 { |
790 partitionFreeWithPage(ptr, page); | 792 SpinLock::Guard guard(root->lock); |
791 spinLockUnlock(&root->lock); | 793 partitionFreeWithPage(ptr, page); |
| 794 } |
792 #endif | 795 #endif |
793 } | 796 } |
794 | 797 |
795 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) | 798 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) |
796 { | 799 { |
797 // Caller must check that the size is not above the kGenericMaxDirectMapped | 800 // Caller must check that the size is not above the kGenericMaxDirectMapped |
798 // limit before calling. This also guards against integer overflow in the | 801 // limit before calling. This also guards against integer overflow in the |
799 // calculation here. | 802 // calculation here. |
800 ASSERT(size <= kGenericMaxDirectMapped); | 803 ASSERT(size <= kGenericMaxDirectMapped); |
801 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; | 804 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
877 using WTF::partitionAlloc; | 880 using WTF::partitionAlloc; |
878 using WTF::partitionFree; | 881 using WTF::partitionFree; |
879 using WTF::partitionAllocGeneric; | 882 using WTF::partitionAllocGeneric; |
880 using WTF::partitionFreeGeneric; | 883 using WTF::partitionFreeGeneric; |
881 using WTF::partitionReallocGeneric; | 884 using WTF::partitionReallocGeneric; |
882 using WTF::partitionAllocActualSize; | 885 using WTF::partitionAllocActualSize; |
883 using WTF::partitionAllocSupportsGetSize; | 886 using WTF::partitionAllocSupportsGetSize; |
884 using WTF::partitionAllocGetSize; | 887 using WTF::partitionAllocGetSize; |
885 | 888 |
886 #endif // WTF_PartitionAlloc_h | 889 #endif // WTF_PartitionAlloc_h |
OLD | NEW |