| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 697 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 708 virtual void prepareHeapForTermination() = 0; | 708 virtual void prepareHeapForTermination() = 0; |
| 709 }; | 709 }; |
| 710 | 710 |
| 711 template<typename Header> | 711 template<typename Header> |
| 712 class FreeList { | 712 class FreeList { |
| 713 public: | 713 public: |
| 714 FreeList(); | 714 FreeList(); |
| 715 | 715 |
| 716 void addToFreeList(Address, size_t); | 716 void addToFreeList(Address, size_t); |
| 717 void clear(); | 717 void clear(); |
| 718 FreeListEntry* takeEntry(size_t allocationSize); | |
| 719 | 718 |
| 720 // Returns a bucket number for inserting a FreeListEntry of a given size. | 719 // Returns a bucket number for inserting a FreeListEntry of a given size. |
| 721 // All FreeListEntries in the given bucket, n, have size >= 2^n. | 720 // All FreeListEntries in the given bucket, n, have size >= 2^n. |
| 722 static int bucketIndexForSize(size_t); | 721 static int bucketIndexForSize(size_t); |
| 723 | 722 |
| 724 private: | 723 private: |
| 725 int m_biggestFreeListIndex; | 724 int m_biggestFreeListIndex; |
| 726 | 725 |
| 727 // All FreeListEntries in the nth list have size >= 2^n. | 726 // All FreeListEntries in the nth list have size >= 2^n. |
| 728 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 727 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
| 729 | 728 |
| 730 #if ENABLE(ASSERT) | |
| 731 friend class ThreadHeap<Header>; | 729 friend class ThreadHeap<Header>; |
| 732 #endif | |
| 733 }; | 730 }; |
| 734 | 731 |
| 735 // Thread heaps represent a part of the per-thread Blink heap. | 732 // Thread heaps represent a part of the per-thread Blink heap. |
| 736 // | 733 // |
| 737 // Each Blink thread has a number of thread heaps: one general heap | 734 // Each Blink thread has a number of thread heaps: one general heap |
| 738 // that contains any type of object and a number of heaps specialized | 735 // that contains any type of object and a number of heaps specialized |
| 739 // for specific object types (such as Node). | 736 // for specific object types (such as Node). |
| 740 // | 737 // |
| 741 // Each thread heap contains the functionality to allocate new objects | 738 // Each thread heap contains the functionality to allocate new objects |
| 742 // (potentially adding new pages to the heap), to find and mark | 739 // (potentially adding new pages to the heap), to find and mark |
| (...skipping 28 matching lines...) Expand all Loading... |
| 771 | 768 |
| 772 ThreadState* threadState() { return m_threadState; } | 769 ThreadState* threadState() { return m_threadState; } |
| 773 | 770 |
| 774 void addToFreeList(Address address, size_t size) | 771 void addToFreeList(Address address, size_t size) |
| 775 { | 772 { |
| 776 ASSERT(pageFromAddress(address)); | 773 ASSERT(pageFromAddress(address)); |
| 777 ASSERT(pageFromAddress(address + size - 1)); | 774 ASSERT(pageFromAddress(address + size - 1)); |
| 778 m_freeList.addToFreeList(address, size); | 775 m_freeList.addToFreeList(address, size); |
| 779 } | 776 } |
| 780 | 777 |
| 781 inline Address allocate(size_t, const GCInfo*); | 778 inline Address allocate(size_t payloadSize, const GCInfo*); |
| 782 inline static size_t roundedAllocationSize(size_t size) | 779 inline static size_t roundedAllocationSize(size_t size) |
| 783 { | 780 { |
| 784 return allocationSizeFromSize(size) - sizeof(Header); | 781 return allocationSizeFromSize(size) - sizeof(Header); |
| 785 } | 782 } |
| 786 | 783 |
| 787 virtual void prepareHeapForTermination() override; | 784 virtual void prepareHeapForTermination() override; |
| 788 | 785 |
| 789 void removePageFromHeap(HeapPage<Header>*); | 786 void removePageFromHeap(HeapPage<Header>*); |
| 790 | 787 |
| 791 PLATFORM_EXPORT void promptlyFreeObject(Header*); | 788 PLATFORM_EXPORT void promptlyFreeObject(Header*); |
| 792 PLATFORM_EXPORT bool expandObject(Header*, size_t); | 789 PLATFORM_EXPORT bool expandObject(Header*, size_t); |
| 793 void shrinkObject(Header*, size_t); | 790 void shrinkObject(Header*, size_t); |
| 794 | 791 |
| 795 private: | 792 private: |
| 796 void addPageToHeap(const GCInfo*); | 793 void addPageToHeap(const GCInfo*); |
| 797 PLATFORM_EXPORT Address outOfLineAllocate(size_t payloadSize, size_t allocat
ionSize, const GCInfo*); | 794 PLATFORM_EXPORT Address outOfLineAllocate(size_t allocationSize, const GCInf
o*); |
| 798 static size_t allocationSizeFromSize(size_t); | 795 static size_t allocationSizeFromSize(size_t); |
| 799 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); | 796 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); |
| 800 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 797 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| 801 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | 798 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
| 802 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r
emainingAllocationSize(); } | 799 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r
emainingAllocationSize(); } |
| 803 void setAllocationPoint(Address point, size_t size) | 800 void setAllocationPoint(Address point, size_t size) |
| 804 { | 801 { |
| 805 ASSERT(!point || pageFromAddress(point)); | 802 ASSERT(!point || pageFromAddress(point)); |
| 806 ASSERT(size <= HeapPage<Header>::payloadSize()); | 803 ASSERT(size <= HeapPage<Header>::payloadSize()); |
| 807 if (hasCurrentAllocationArea()) | 804 if (hasCurrentAllocationArea()) |
| 808 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | 805 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
| 809 updateRemainingAllocationSize(); | 806 updateRemainingAllocationSize(); |
| 810 m_currentAllocationPoint = point; | 807 m_currentAllocationPoint = point; |
| 811 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; | 808 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; |
| 812 } | 809 } |
| 813 void updateRemainingAllocationSize(); | 810 void updateRemainingAllocationSize(); |
| 814 bool allocateFromFreeList(size_t); | 811 Address allocateFromFreeList(size_t, const GCInfo*); |
| 815 | 812 |
| 816 void freeLargeObject(LargeObject<Header>*, LargeObject<Header>**); | 813 void freeLargeObject(LargeObject<Header>*, LargeObject<Header>**); |
| 817 void allocatePage(const GCInfo*); | 814 void allocatePage(const GCInfo*); |
| 818 | 815 |
| 816 inline Address allocateSize(size_t allocationSize, const GCInfo*); |
| 817 inline Address allocateInto(Address, size_t allocationSize, const GCInfo*); |
| 818 |
| 819 #if ENABLE(ASSERT) | 819 #if ENABLE(ASSERT) |
| 820 bool pagesToBeSweptContains(Address); | 820 bool pagesToBeSweptContains(Address); |
| 821 bool pagesAllocatedDuringSweepingContains(Address); | 821 bool pagesAllocatedDuringSweepingContains(Address); |
| 822 #endif | 822 #endif |
| 823 | 823 |
| 824 void sweepNormalPages(); | 824 void sweepNormalPages(); |
| 825 void sweepLargePages(); | 825 void sweepLargePages(); |
| 826 bool coalesce(size_t); | 826 bool coalesce(size_t); |
| 827 | 827 |
| 828 Address allocateFromFreeListEntry(FreeListEntry*, size_t, const GCInfo*); |
| 829 |
| 828 Address m_currentAllocationPoint; | 830 Address m_currentAllocationPoint; |
| 829 size_t m_remainingAllocationSize; | 831 size_t m_remainingAllocationSize; |
| 830 size_t m_lastRemainingAllocationSize; | 832 size_t m_lastRemainingAllocationSize; |
| 831 | 833 |
| 832 HeapPage<Header>* m_firstPage; | 834 HeapPage<Header>* m_firstPage; |
| 833 LargeObject<Header>* m_firstLargeObject; | 835 LargeObject<Header>* m_firstLargeObject; |
| 834 | 836 |
| 835 HeapPage<Header>* m_firstPageAllocatedDuringSweeping; | 837 HeapPage<Header>* m_firstPageAllocatedDuringSweeping; |
| 836 HeapPage<Header>* m_lastPageAllocatedDuringSweeping; | 838 HeapPage<Header>* m_lastPageAllocatedDuringSweeping; |
| 837 | 839 |
| (...skipping 429 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1267 // therefore has to happen before any calculation on the size. | 1269 // therefore has to happen before any calculation on the size. |
| 1268 RELEASE_ASSERT(size < maxHeapObjectSize); | 1270 RELEASE_ASSERT(size < maxHeapObjectSize); |
| 1269 | 1271 |
| 1270 // Add space for header. | 1272 // Add space for header. |
| 1271 size_t allocationSize = size + sizeof(Header); | 1273 size_t allocationSize = size + sizeof(Header); |
| 1272 // Align size with allocation granularity. | 1274 // Align size with allocation granularity. |
| 1273 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 1275 allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
| 1274 return allocationSize; | 1276 return allocationSize; |
| 1275 } | 1277 } |
| 1276 | 1278 |
| 1279 |
| 1280 template<typename Header> |
| 1281 inline Address ThreadHeap<Header>::allocateInto(Address headerAddress, size_t al
locationSize, const GCInfo* gcInfo) |
| 1282 { |
| 1283 new (NotNull, headerAddress) Header(allocationSize, gcInfo); |
| 1284 Address result = headerAddress + sizeof(Header); |
| 1285 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 1286 |
| 1287 // Unpoison the memory used for the object (payload). |
| 1288 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header)); |
| 1289 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 1290 memset(result, 0, allocationSize - sizeof(Header)); |
| 1291 #endif |
| 1292 ASSERT(pageFromAddress(headerAddress + allocationSize - 1)); |
| 1293 return result; |
| 1294 } |
| 1295 |
| 1296 template<typename Header> |
| 1297 Address ThreadHeap<Header>::allocateSize(size_t allocationSize, const GCInfo* gc
Info) |
| 1298 { |
| 1299 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
| 1300 Address headerAddress = m_currentAllocationPoint; |
| 1301 m_currentAllocationPoint += allocationSize; |
| 1302 m_remainingAllocationSize -= allocationSize; |
| 1303 return allocateInto(headerAddress, allocationSize, gcInfo); |
| 1304 } |
| 1305 return outOfLineAllocate(allocationSize, gcInfo); |
| 1306 } |
| 1307 |
| 1277 template<typename Header> | 1308 template<typename Header> |
| 1278 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) | 1309 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) |
| 1279 { | 1310 { |
| 1280 size_t allocationSize = allocationSizeFromSize(size); | 1311 return allocateSize(allocationSizeFromSize(size), gcInfo); |
| 1281 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { | |
| 1282 Address headerAddress = m_currentAllocationPoint; | |
| 1283 m_currentAllocationPoint += allocationSize; | |
| 1284 m_remainingAllocationSize -= allocationSize; | |
| 1285 new (NotNull, headerAddress) Header(allocationSize, gcInfo); | |
| 1286 Address result = headerAddress + sizeof(Header); | |
| 1287 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | |
| 1288 | |
| 1289 // Unpoison the memory used for the object (payload). | |
| 1290 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header)); | |
| 1291 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | |
| 1292 memset(result, 0, allocationSize - sizeof(Header)); | |
| 1293 #endif | |
| 1294 ASSERT(pageFromAddress(headerAddress + allocationSize - 1)); | |
| 1295 return result; | |
| 1296 } | |
| 1297 return outOfLineAllocate(size, allocationSize, gcInfo); | |
| 1298 } | 1312 } |
| 1299 | 1313 |
| 1300 template<typename T, typename HeapTraits> | 1314 template<typename T, typename HeapTraits> |
| 1301 Address Heap::allocate(size_t size) | 1315 Address Heap::allocate(size_t size) |
| 1302 { | 1316 { |
| 1303 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1317 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| 1304 ASSERT(state->isAllocationAllowed()); | 1318 ASSERT(state->isAllocationAllowed()); |
| 1305 const GCInfo* gcInfo = GCInfoTrait<T>::get(); | 1319 const GCInfo* gcInfo = GCInfoTrait<T>::get(); |
| 1306 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer(), size); | 1320 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer(), size); |
| 1307 BaseHeap* heap = state->heap(heapIndex); | 1321 BaseHeap* heap = state->heap(heapIndex); |
| (...skipping 1022 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2330 template<typename T, size_t inlineCapacity> | 2344 template<typename T, size_t inlineCapacity> |
| 2331 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; | 2345 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; |
| 2332 template<typename T, size_t inlineCapacity> | 2346 template<typename T, size_t inlineCapacity> |
| 2333 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; | 2347 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; |
| 2334 template<typename T, typename U, typename V> | 2348 template<typename T, typename U, typename V> |
| 2335 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; | 2349 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; |
| 2336 | 2350 |
| 2337 } // namespace blink | 2351 } // namespace blink |
| 2338 | 2352 |
| 2339 #endif // Heap_h | 2353 #endif // Heap_h |
| OLD | NEW |